aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb13
-rw-r--r--Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc13
-rw-r--r--Documentation/arm/OMAP/DSS317
-rw-r--r--Documentation/filesystems/nilfs2.txt7
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/powerpc/dts-bindings/xilinx.txt11
-rw-r--r--Documentation/usb/power-management.txt69
-rw-r--r--MAINTAINERS21
-rw-r--r--arch/alpha/kernel/osf_sys.c19
-rw-r--r--arch/arm/configs/omap_3430sdp_defconfig28
-rw-r--r--arch/arm/include/asm/mman.h3
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/sys_arm.c55
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c167
-rw-r--r--arch/arm/mach-omap2/clock24xx.c8
-rw-r--r--arch/arm/mach-omap2/clock34xx.c14
-rw-r--r--arch/arm/mach-omap2/io.c4
-rw-r--r--arch/arm/mach-omap2/sdrc.c16
-rw-r--r--arch/arm/mm/mmap.c3
-rw-r--r--arch/arm/plat-mxc/Makefile1
-rw-r--r--arch/arm/plat-mxc/ehci.c92
-rw-r--r--arch/arm/plat-mxc/include/mach/mxc_ehci.h37
-rw-r--r--arch/arm/plat-omap/fb.c49
-rw-r--r--arch/arm/plat-omap/include/plat/display.h575
-rw-r--r--arch/arm/plat-omap/include/plat/sdrc.h9
-rw-r--r--arch/arm/plat-omap/include/plat/vram.h62
-rw-r--r--arch/arm/plat-omap/include/plat/vrfb.h50
-rw-r--r--arch/arm/plat-omap/sram.c8
-rw-r--r--arch/avr32/include/asm/syscalls.h4
-rw-r--r--arch/avr32/kernel/sys_avr32.c31
-rw-r--r--arch/avr32/kernel/syscall-stubs.S2
-rw-r--r--arch/blackfin/kernel/sys_bfin.c33
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/cris/kernel/sys_cris.c30
-rw-r--r--arch/frv/kernel/sys_frv.c66
-rw-r--r--arch/h8300/kernel/sys_h8300.c83
-rw-r--r--arch/h8300/kernel/syscalls.S2
-rw-r--r--arch/ia64/ia32/sys_ia32.c3
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
-rw-r--r--arch/ia64/kernel/sys_ia64.c83
-rw-r--r--arch/ia64/pci/pci.c33
-rw-r--r--arch/m32r/kernel/sys_m32r.c24
-rw-r--r--arch/m32r/kernel/syscall_table.S2
-rw-r--r--arch/m68k/kernel/sys_m68k.c83
-rw-r--r--arch/m68knommu/kernel/sys_m68k.c38
-rw-r--r--arch/m68knommu/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c38
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/mips/kernel/linux32.c19
-rw-r--r--arch/mips/kernel/syscall.c32
-rw-r--r--arch/mn10300/include/asm/mman.h5
-rw-r--r--arch/mn10300/kernel/entry.S2
-rw-r--r--arch/mn10300/kernel/sys_mn10300.c36
-rw-r--r--arch/parisc/kernel/sys_parisc.c30
-rw-r--r--arch/powerpc/kernel/syscalls.c15
-rw-r--r--arch/s390/kernel/compat_linux.c37
-rw-r--r--arch/s390/kernel/sys_s390.c30
-rw-r--r--arch/score/kernel/sys_score.c28
-rw-r--r--arch/sh/kernel/sys_sh.c28
-rw-r--r--arch/sh/mm/mmap.c3
-rw-r--r--arch/sparc/include/asm/pci_64.h2
-rw-r--r--arch/sparc/kernel/pci.c7
-rw-r--r--arch/sparc/kernel/sys_sparc32.c22
-rw-r--r--arch/sparc/kernel/sys_sparc_32.c64
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c50
-rw-r--r--arch/sparc/kernel/systbls.h1
-rw-r--r--arch/sparc/kernel/systbls_32.S4
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/um/kernel/syscall.c28
-rw-r--r--arch/um/sys-i386/shared/sysdep/syscalls.h4
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/ia32/sys_ia32.c43
-rw-r--r--arch/x86/include/asm/pci_x86.h20
-rw-r--r--arch/x86/include/asm/sys_ia32.h3
-rw-r--r--arch/x86/include/asm/syscalls.h2
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h27
-rw-r--r--arch/x86/kernel/amd_iommu_init.c3
-rw-r--r--arch/x86/kernel/kgdb.c14
-rw-r--r--arch/x86/kernel/sys_i386_32.c27
-rw-r--r--arch/x86/kernel/sys_x86_64.c17
-rw-r--r--arch/x86/kernel/syscall_table_32.S2
-rw-r--r--arch/x86/pci/Makefile5
-rw-r--r--arch/x86/pci/acpi.c74
-rw-r--r--arch/x86/pci/amd_bus.c120
-rw-r--r--arch/x86/pci/bus_numa.c101
-rw-r--r--arch/x86/pci/bus_numa.h27
-rw-r--r--arch/x86/pci/common.c20
-rw-r--r--arch/x86/pci/early.c7
-rw-r--r--arch/x86/pci/i386.c42
-rw-r--r--arch/x86/pci/intel_bus.c90
-rw-r--r--arch/x86/pci/mmconfig-shared.c356
-rw-r--r--arch/x86/pci/mmconfig_32.c16
-rw-r--r--arch/x86/pci/mmconfig_64.c88
-rw-r--r--arch/x86/xen/enlighten.c6
-rw-r--r--arch/xtensa/include/asm/syscall.h2
-rw-r--r--arch/xtensa/include/asm/unistd.h2
-rw-r--r--arch/xtensa/kernel/syscall.c25
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/hest.c135
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/char/agp/intel-agp.c103
-rw-r--r--drivers/char/hvc_xen.c2
-rw-r--r--drivers/edac/amd64_edac.c1251
-rw-r--r--drivers/edac/amd64_edac.h62
-rw-r--r--drivers/edac/edac_core.h1
-rw-r--r--drivers/edac/edac_mc.c24
-rw-r--r--drivers/edac/edac_mce_amd.c2
-rw-r--r--drivers/firewire/ohci.c12
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/drm_crtc.c176
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c5
-rw-r--r--drivers/gpu/drm/drm_dp_i2c_helper.c (renamed from drivers/gpu/drm/i915/intel_dp_i2c.c)76
-rw-r--r--drivers/gpu/drm/drm_drv.c42
-rw-r--r--drivers/gpu/drm/drm_edid.c328
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c23
-rw-r--r--drivers/gpu/drm/drm_fops.c112
-rw-r--r--drivers/gpu/drm/drm_irq.c130
-rw-r--r--drivers/gpu/drm/drm_mm.c110
-rw-r--r--drivers/gpu/drm/drm_modes.c28
-rw-r--r--drivers/gpu/drm/drm_stub.c15
-rw-r--r--drivers/gpu/drm/i2c/Makefile4
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c531
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c473
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h344
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7017.c9
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c16
-rw-r--r--drivers/gpu/drm/i915/dvo_ivch.c37
-rw-r--r--drivers/gpu/drm/i915/dvo_sil164.c20
-rw-r--r--drivers/gpu/drm/i915/dvo_tfp410.c34
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c120
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c40
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h80
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c114
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c163
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c92
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h71
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c86
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c137
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h17
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c50
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1036
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c162
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h44
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c7
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c55
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c140
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c1416
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c58
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig44
-rw-r--r--drivers/gpu/drm/nouveau/Makefile31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c125
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c6095
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h289
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c671
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c478
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c468
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c824
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h54
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c155
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c206
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h157
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c569
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c405
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1286
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h91
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c380
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h47
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c262
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c992
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c1080
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.h455
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c269
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h52
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c702
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c568
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c1294
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h836
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c321
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c811
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c131
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c1002
-rw-r--r--drivers/gpu/drm/nouveau/nv04_cursor.c70
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c528
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c621
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c288
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fb.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c316
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c271
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c579
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c208
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c51
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c305
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fb.c24
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c260
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c892
-rw-r--r--drivers/gpu/drm/nouveau/nv17_gpio.c92
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c681
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.h156
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv_modes.c583
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c780
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fb.c62
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c314
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c560
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c38
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c769
-rw-r--r--drivers/gpu/drm/nouveau/nv50_cursor.c156
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c304
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c1015
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.h113
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c273
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c494
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c385
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c509
-rw-r--r--drivers/gpu/drm/nouveau/nv50_mc.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c309
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h535
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c33
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c59
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c790
-rw-r--r--drivers/gpu/drm/radeon/r100.c245
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h10
-rw-r--r--drivers/gpu/drm/radeon/r300.c33
-rw-r--r--drivers/gpu/drm/radeon/r420.c25
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c8
-rw-r--r--drivers/gpu/drm/radeon/r600.c1147
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c34
-rw-r--r--drivers/gpu/drm/radeon/r600d.h212
-rw-r--r--drivers/gpu/drm/radeon/radeon.h165
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h70
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c688
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c194
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c45
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c145
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c276
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c72
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_fixed.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c182
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c125
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h149
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c565
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h157
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h60
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c94
-rw-r--r--drivers/gpu/drm/radeon/rs400.c17
-rw-r--r--drivers/gpu/drm/radeon/rs600.c236
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h112
-rw-r--r--drivers/gpu/drm/radeon/rs690.c57
-rw-r--r--drivers/gpu/drm/radeon/rv515.c24
-rw-r--r--drivers/gpu/drm/radeon/rv770.c79
-rw-r--r--drivers/gpu/drm/ttm/Makefile3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c649
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c117
-rw-r--r--drivers/gpu/drm/ttm/ttm_lock.c311
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c16
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c452
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/hid/usbhid/hid-core.c8
-rw-r--r--drivers/input/xen-kbdfront.c3
-rw-r--r--drivers/misc/kgdbts.c14
-rw-r--r--drivers/net/wimax/i2400m/usb.c7
-rw-r--r--drivers/net/xen-netfront.c1
-rw-r--r--drivers/pci/Kconfig15
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/dmar.c7
-rw-r--r--drivers/pci/hotplug/Makefile12
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c5
-rw-r--r--drivers/pci/hotplug/acpiphp.h6
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c248
-rw-r--r--drivers/pci/hotplug/ibmphp_hpc.c3
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c22
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_acpi.c3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c119
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c57
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c155
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c4
-rw-r--r--drivers/pci/intel-iommu.c10
-rw-r--r--drivers/pci/intr_remapping.c4
-rw-r--r--drivers/pci/ioapic.c127
-rw-r--r--drivers/pci/iov.c2
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-sysfs.c25
-rw-r--r--drivers/pci/pci.c154
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c58
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c70
-rw-r--r--drivers/pci/pcie/aer/ecrc.c4
-rw-r--r--drivers/pci/pcie/aspm.c41
-rw-r--r--drivers/pci/pcie/portdrv.h21
-rw-r--r--drivers/pci/pcie/portdrv_bus.c7
-rw-r--r--drivers/pci/pcie/portdrv_core.c239
-rw-r--r--drivers/pci/pcie/portdrv_pci.c14
-rw-r--r--drivers/pci/probe.c83
-rw-r--r--drivers/pci/quirks.c40
-rw-r--r--drivers/pci/search.c38
-rw-r--r--drivers/pci/setup-bus.c112
-rw-r--r--drivers/pci/setup-res.c68
-rw-r--r--drivers/pcmcia/cardbus.c23
-rw-r--r--drivers/pnp/quirks.c13
-rw-r--r--drivers/pnp/resource.c10
-rw-r--r--drivers/pnp/support.c43
-rw-r--r--drivers/pnp/system.c14
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/usb/Kconfig2
-rw-r--r--drivers/usb/Makefile2
-rw-r--r--drivers/usb/class/cdc-acm.c58
-rw-r--r--drivers/usb/class/usbtmc.c54
-rw-r--r--drivers/usb/core/driver.c135
-rw-r--r--drivers/usb/core/file.c1
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c82
-rw-r--r--drivers/usb/core/hcd.h19
-rw-r--r--drivers/usb/core/hub.c142
-rw-r--r--drivers/usb/core/message.c82
-rw-r--r--drivers/usb/core/sysfs.c61
-rw-r--r--drivers/usb/core/urb.c22
-rw-r--r--drivers/usb/core/usb.c67
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/gadget/Kconfig60
-rw-r--r--drivers/usb/gadget/Makefile4
-rw-r--r--drivers/usb/gadget/at91_udc.c6
-rw-r--r--drivers/usb/gadget/audio.c115
-rw-r--r--drivers/usb/gadget/composite.c59
-rw-r--r--drivers/usb/gadget/ether.c16
-rw-r--r--drivers/usb/gadget/f_acm.c28
-rw-r--r--drivers/usb/gadget/f_audio.c76
-rw-r--r--drivers/usb/gadget/f_mass_storage.c3091
-rw-r--r--drivers/usb/gadget/f_rndis.c35
-rw-r--r--drivers/usb/gadget/file_storage.c879
-rw-r--r--drivers/usb/gadget/mass_storage.c240
-rw-r--r--drivers/usb/gadget/multi.c358
-rw-r--r--drivers/usb/gadget/storage_common.c778
-rw-r--r--drivers/usb/gadget/u_ether.h2
-rw-r--r--drivers/usb/host/Kconfig22
-rw-r--r--drivers/usb/host/ehci-hcd.c19
-rw-r--r--drivers/usb/host/ehci-hub.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c296
-rw-r--r--drivers/usb/host/ehci-omap.c756
-rw-r--r--drivers/usb/host/ehci-q.c32
-rw-r--r--drivers/usb/host/ehci-sched.c36
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c300
-rw-r--r--drivers/usb/host/isp1362.h4
-rw-r--r--drivers/usb/host/ohci-at91.c10
-rw-r--r--drivers/usb/host/ohci-pnx4008.c8
-rw-r--r--drivers/usb/host/r8a66597-hcd.c4
-rw-r--r--drivers/usb/host/whci/debug.c20
-rw-r--r--drivers/usb/host/whci/hcd.c1
-rw-r--r--drivers/usb/host/whci/qset.c350
-rw-r--r--drivers/usb/host/whci/whcd.h9
-rw-r--r--drivers/usb/host/whci/whci-hc.h14
-rw-r--r--drivers/usb/host/xhci-hcd.c139
-rw-r--r--drivers/usb/host/xhci-mem.c255
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci-ring.c343
-rw-r--r--drivers/usb/host/xhci.h37
-rw-r--r--drivers/usb/misc/usbtest.c7
-rw-r--r--drivers/usb/mon/mon_bin.c51
-rw-r--r--drivers/usb/mon/mon_text.c23
-rw-r--r--drivers/usb/musb/Kconfig5
-rw-r--r--drivers/usb/musb/blackfin.c16
-rw-r--r--drivers/usb/musb/blackfin.h37
-rw-r--r--drivers/usb/musb/musb_core.c10
-rw-r--r--drivers/usb/musb/musb_core.h8
-rw-r--r--drivers/usb/musb/musb_dma.h11
-rw-r--r--drivers/usb/musb/musb_gadget.c196
-rw-r--r--drivers/usb/musb/musb_gadget.h4
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c70
-rw-r--r--drivers/usb/musb/musb_host.c10
-rw-r--r--drivers/usb/musb/musb_regs.h4
-rw-r--r--drivers/usb/musb/musbhsdma.c12
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/otg/Kconfig9
-rw-r--r--drivers/usb/otg/Makefile1
-rw-r--r--drivers/usb/otg/twl4030-usb.c4
-rw-r--r--drivers/usb/otg/ulpi.c136
-rw-r--r--drivers/usb/serial/ark3116.c975
-rw-r--r--drivers/usb/serial/ftdi_sio.c32
-rw-r--r--drivers/usb/serial/ftdi_sio.h14
-rw-r--r--drivers/usb/serial/mos7840.c24
-rw-r--r--drivers/usb/serial/option.c39
-rw-r--r--drivers/usb/serial/sierra.c91
-rw-r--r--drivers/usb/storage/scsiglue.c3
-rw-r--r--drivers/usb/storage/transport.c17
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/usb.c43
-rw-r--r--drivers/usb/storage/usb.h1
-rw-r--r--drivers/usb/usb-skeleton.c4
-rw-r--r--drivers/usb/wusbcore/devconnect.c7
-rw-r--r--drivers/usb/wusbcore/security.c6
-rw-r--r--drivers/usb/wusbcore/wusbhc.c32
-rw-r--r--drivers/usb/wusbcore/wusbhc.h1
-rw-r--r--drivers/video/Kconfig1
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/omap/Kconfig5
-rw-r--r--drivers/video/omap/blizzard.c2
-rw-r--r--drivers/video/omap/dispc.c21
-rw-r--r--drivers/video/omap/hwa742.c3
-rw-r--r--drivers/video/omap/lcd_2430sdp.c3
-rw-r--r--drivers/video/omap/lcd_ams_delta.c3
-rw-r--r--drivers/video/omap/lcd_apollon.c3
-rw-r--r--drivers/video/omap/lcd_h3.c2
-rw-r--r--drivers/video/omap/lcd_h4.c2
-rw-r--r--drivers/video/omap/lcd_htcherald.c2
-rw-r--r--drivers/video/omap/lcd_inn1510.c2
-rw-r--r--drivers/video/omap/lcd_inn1610.c2
-rw-r--r--drivers/video/omap/lcd_ldp.c3
-rw-r--r--drivers/video/omap/lcd_mipid.c3
-rw-r--r--drivers/video/omap/lcd_omap2evm.c3
-rw-r--r--drivers/video/omap/lcd_omap3beagle.c4
-rw-r--r--drivers/video/omap/lcd_omap3evm.c3
-rw-r--r--drivers/video/omap/lcd_osk.c2
-rw-r--r--drivers/video/omap/lcd_overo.c3
-rw-r--r--drivers/video/omap/lcd_palmte.c2
-rw-r--r--drivers/video/omap/lcd_palmtt.c2
-rw-r--r--drivers/video/omap/lcd_palmz71.c2
-rw-r--r--drivers/video/omap/lcdc.c3
-rw-r--r--drivers/video/omap/omapfb.h (renamed from arch/arm/plat-omap/include/plat/omapfb.h)191
-rw-r--r--drivers/video/omap/omapfb_main.c2
-rw-r--r--drivers/video/omap/rfbi.c3
-rw-r--r--drivers/video/omap/sossi.c3
-rw-r--r--drivers/video/omap2/Kconfig9
-rw-r--r--drivers/video/omap2/Makefile6
-rw-r--r--drivers/video/omap2/displays/Kconfig22
-rw-r--r--drivers/video/omap2/displays/Makefile4
-rw-r--r--drivers/video/omap2/displays/panel-generic.c104
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c153
-rw-r--r--drivers/video/omap2/displays/panel-taal.c1003
-rw-r--r--drivers/video/omap2/dss/Kconfig89
-rw-r--r--drivers/video/omap2/dss/Makefile6
-rw-r--r--drivers/video/omap2/dss/core.c919
-rw-r--r--drivers/video/omap2/dss/dispc.c3091
-rw-r--r--drivers/video/omap2/dss/display.c671
-rw-r--r--drivers/video/omap2/dss/dpi.c399
-rw-r--r--drivers/video/omap2/dss/dsi.c3710
-rw-r--r--drivers/video/omap2/dss/dss.c596
-rw-r--r--drivers/video/omap2/dss/dss.h370
-rw-r--r--drivers/video/omap2/dss/manager.c1487
-rw-r--r--drivers/video/omap2/dss/overlay.c680
-rw-r--r--drivers/video/omap2/dss/rfbi.c1309
-rw-r--r--drivers/video/omap2/dss/sdi.c277
-rw-r--r--drivers/video/omap2/dss/venc.c797
-rw-r--r--drivers/video/omap2/omapfb/Kconfig37
-rw-r--r--drivers/video/omap2/omapfb/Makefile2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c755
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c2261
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c507
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h146
-rw-r--r--drivers/video/omap2/vram.c655
-rw-r--r--drivers/video/omap2/vrfb.c315
-rw-r--r--drivers/video/xen-fbfront.c3
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/cpu_hotplug.c1
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/grant-table.c1
-rw-r--r--drivers/xen/sys-hypervisor.c1
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c2
-rw-r--r--drivers/xen/xenfs/super.c2
-rw-r--r--fs/nilfs2/alloc.c108
-rw-r--r--fs/nilfs2/alloc.h21
-rw-r--r--fs/nilfs2/bmap.c8
-rw-r--r--fs/nilfs2/btnode.c76
-rw-r--r--fs/nilfs2/btnode.h6
-rw-r--r--fs/nilfs2/btree.c106
-rw-r--r--fs/nilfs2/btree.h22
-rw-r--r--fs/nilfs2/cpfile.c26
-rw-r--r--fs/nilfs2/cpfile.h3
-rw-r--r--fs/nilfs2/dat.c47
-rw-r--r--fs/nilfs2/dat.h3
-rw-r--r--fs/nilfs2/dir.c24
-rw-r--r--fs/nilfs2/gcdat.c3
-rw-r--r--fs/nilfs2/gcinode.c6
-rw-r--r--fs/nilfs2/ifile.c35
-rw-r--r--fs/nilfs2/ifile.h2
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/nilfs2/mdt.c56
-rw-r--r--fs/nilfs2/mdt.h25
-rw-r--r--fs/nilfs2/namei.c83
-rw-r--r--fs/nilfs2/recovery.c34
-rw-r--r--fs/nilfs2/segbuf.c185
-rw-r--r--fs/nilfs2/segbuf.h54
-rw-r--r--fs/nilfs2/segment.c369
-rw-r--r--fs/nilfs2/segment.h2
-rw-r--r--fs/nilfs2/sufile.c203
-rw-r--r--fs/nilfs2/sufile.h14
-rw-r--r--fs/nilfs2/super.c88
-rw-r--r--fs/nilfs2/the_nilfs.c155
-rw-r--r--fs/nilfs2/the_nilfs.h10
-rw-r--r--include/acpi/acpi_hest.h12
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drm.h65
-rw-r--r--include/drm/drmP.h87
-rw-r--r--include/drm/drm_crtc.h47
-rw-r--r--include/drm/drm_dp_helper.h (renamed from drivers/gpu/drm/i915/intel_dp.h)74
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_mm.h35
-rw-r--r--include/drm/drm_mode.h80
-rw-r--r--include/drm/drm_os_linux.h2
-rw-r--r--include/drm/i2c/ch7006.h86
-rw-r--r--include/drm/i915_drm.h78
-rw-r--r--include/drm/mga_drm.h2
-rw-r--r--include/drm/nouveau_drm.h220
-rw-r--r--include/drm/radeon_drm.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h115
-rw-r--r--include/drm/ttm/ttm_bo_driver.h37
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h107
-rw-r--r--include/drm/ttm/ttm_lock.h247
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_object.h267
-rw-r--r--include/drm/via_drm.h2
-rw-r--r--include/linux/nilfs2_fs.h24
-rw-r--r--include/linux/omapfb.h251
-rw-r--r--include/linux/pci.h42
-rw-r--r--include/linux/pci_regs.h18
-rw-r--r--include/linux/pcieport_if.h16
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/usb.h33
-rw-r--r--include/linux/usb/composite.h1
-rw-r--r--include/linux/usb/otg.h68
-rw-r--r--include/linux/usb/ulpi.h7
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/xen/xen.h32
-rw-r--r--ipc/shm.c31
-rw-r--r--kernel/kgdb.c56
-rw-r--r--kernel/resource.c26
-rw-r--r--lib/vsprintf.c87
-rw-r--r--mm/mmap.c42
-rw-r--r--mm/mremap.c241
-rw-r--r--mm/util.c44
-rw-r--r--scripts/mod/file2alias.c95
568 files changed, 83725 insertions, 9476 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 7772928ee48f..deb6b489e4e5 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -144,3 +144,16 @@ Description:
144 144
145 Write a 1 to force the device to disconnect 145 Write a 1 to force the device to disconnect
146 (equivalent to unplugging a wired USB device). 146 (equivalent to unplugging a wired USB device).
147
148What: /sys/bus/usb/drivers/.../remove_id
149Date: November 2009
150Contact: CHENG Renquan <rqcheng@smu.edu.sg>
151Description:
152 Writing a device ID to this file will remove an ID
153 that was dynamically added via the new_id sysfs entry.
154 The format for the device ID is:
155 idVendor idProduct. After successfully
156 removing an ID, the driver will no longer support the
157 device. This is useful to ensure auto probing won't
158 match the driver to the device. For example:
159 # echo "046d c315" > /sys/bus/usb/drivers/foo/remove_id
diff --git a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
index 4e8106f7cfd9..25b1e751b777 100644
--- a/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
+++ b/Documentation/ABI/testing/sysfs-class-uwb_rc-wusbhc
@@ -23,3 +23,16 @@ Description:
23 Since this relates to security (specifically, the 23 Since this relates to security (specifically, the
24 lifetime of PTKs and GTKs) it should not be changed 24 lifetime of PTKs and GTKs) it should not be changed
25 from the default. 25 from the default.
26
27What: /sys/class/uwb_rc/uwbN/wusbhc/wusb_phy_rate
28Date: August 2009
29KernelVersion: 2.6.32
30Contact: David Vrabel <david.vrabel@csr.com>
31Description:
32 The maximum PHY rate to use for all connected devices.
33 This is only of limited use for testing and
34 development as the hardware's automatic rate
35 adaptation is better then this simple control.
36
37 Refer to [ECMA-368] section 10.3.1.1 for the value to
38 use.
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
new file mode 100644
index 000000000000..0af0e9eed5d6
--- /dev/null
+++ b/Documentation/arm/OMAP/DSS
@@ -0,0 +1,317 @@
1OMAP2/3 Display Subsystem
2-------------------------
3
4This is an almost total rewrite of the OMAP FB driver in drivers/video/omap
5(let's call it DSS1). The main differences between DSS1 and DSS2 are DSI,
6TV-out and multiple display support, but there are lots of small improvements
7also.
8
9The DSS2 driver (omapdss module) is in arch/arm/plat-omap/dss/, and the FB,
10panel and controller drivers are in drivers/video/omap2/. DSS1 and DSS2 live
11currently side by side, you can choose which one to use.
12
13Features
14--------
15
16Working and tested features include:
17
18- MIPI DPI (parallel) output
19- MIPI DSI output in command mode
20- MIPI DBI (RFBI) output
21- SDI output
22- TV output
23- All pieces can be compiled as a module or inside kernel
24- Use DISPC to update any of the outputs
25- Use CPU to update RFBI or DSI output
26- OMAP DISPC planes
27- RGB16, RGB24 packed, RGB24 unpacked
28- YUV2, UYVY
29- Scaling
30- Adjusting DSS FCK to find a good pixel clock
31- Use DSI DPLL to create DSS FCK
32
33Tested boards include:
34- OMAP3 SDP board
35- Beagle board
36- N810
37
38omapdss driver
39--------------
40
41The DSS driver does not itself have any support for Linux framebuffer, V4L or
42such like the current ones, but it has an internal kernel API that upper level
43drivers can use.
44
45The DSS driver models OMAP's overlays, overlay managers and displays in a
46flexible way to enable non-common multi-display configuration. In addition to
47modelling the hardware overlays, omapdss supports virtual overlays and overlay
48managers. These can be used when updating a display with CPU or system DMA.
49
50Panel and controller drivers
51----------------------------
52
53The drivers implement panel or controller specific functionality and are not
54usually visible to users except through omapfb driver. They register
55themselves to the DSS driver.
56
57omapfb driver
58-------------
59
60The omapfb driver implements arbitrary number of standard linux framebuffers.
61These framebuffers can be routed flexibly to any overlays, thus allowing very
62dynamic display architecture.
63
64The driver exports some omapfb specific ioctls, which are compatible with the
65ioctls in the old driver.
66
67The rest of the non standard features are exported via sysfs. Whether the final
68implementation will use sysfs, or ioctls, is still open.
69
70V4L2 drivers
71------------
72
73V4L2 is being implemented in TI.
74
75From omapdss point of view the V4L2 drivers should be similar to framebuffer
76driver.
77
78Architecture
79--------------------
80
81Some clarification what the different components do:
82
83 - Framebuffer is a memory area inside OMAP's SRAM/SDRAM that contains the
84 pixel data for the image. Framebuffer has width and height and color
85 depth.
86 - Overlay defines where the pixels are read from and where they go on the
87 screen. The overlay may be smaller than framebuffer, thus displaying only
88 part of the framebuffer. The position of the overlay may be changed if
89 the overlay is smaller than the display.
90 - Overlay manager combines the overlays in to one image and feeds them to
91 display.
92 - Display is the actual physical display device.
93
94A framebuffer can be connected to multiple overlays to show the same pixel data
95on all of the overlays. Note that in this case the overlay input sizes must be
96the same, but, in case of video overlays, the output size can be different. Any
97framebuffer can be connected to any overlay.
98
99An overlay can be connected to one overlay manager. Also DISPC overlays can be
100connected only to DISPC overlay managers, and virtual overlays can be only
101connected to virtual overlays.
102
103An overlay manager can be connected to one display. There are certain
104restrictions which kinds of displays an overlay manager can be connected:
105
106 - DISPC TV overlay manager can be only connected to TV display.
107 - Virtual overlay managers can only be connected to DBI or DSI displays.
108 - DISPC LCD overlay manager can be connected to all displays, except TV
109 display.
110
111Sysfs
112-----
113The sysfs interface is mainly used for testing. I don't think sysfs
114interface is the best for this in the final version, but I don't quite know
115what would be the best interfaces for these things.
116
117The sysfs interface is divided to two parts: DSS and FB.
118
119/sys/class/graphics/fb? directory:
120mirror 0=off, 1=on
121rotate Rotation 0-3 for 0, 90, 180, 270 degrees
122rotate_type 0 = DMA rotation, 1 = VRFB rotation
123overlays List of overlay numbers to which framebuffer pixels go
124phys_addr Physical address of the framebuffer
125virt_addr Virtual address of the framebuffer
126size Size of the framebuffer
127
128/sys/devices/platform/omapdss/overlay? directory:
129enabled 0=off, 1=on
130input_size width,height (ie. the framebuffer size)
131manager Destination overlay manager name
132name
133output_size width,height
134position x,y
135screen_width width
136global_alpha global alpha 0-255 0=transparent 255=opaque
137
138/sys/devices/platform/omapdss/manager? directory:
139display Destination display
140name
141alpha_blending_enabled 0=off, 1=on
142trans_key_enabled 0=off, 1=on
143trans_key_type gfx-destination, video-source
144trans_key_value transparency color key (RGB24)
145default_color default background color (RGB24)
146
147/sys/devices/platform/omapdss/display? directory:
148ctrl_name Controller name
149mirror 0=off, 1=on
150update_mode 0=off, 1=auto, 2=manual
151enabled 0=off, 1=on
152name
153rotate Rotation 0-3 for 0, 90, 180, 270 degrees
154timings Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
155 When writing, two special timings are accepted for tv-out:
156 "pal" and "ntsc"
157panel_name
158tear_elim Tearing elimination 0=off, 1=on
159
160There are also some debugfs files at <debugfs>/omapdss/ which show information
161about clocks and registers.
162
163Examples
164--------
165
166The following definitions have been made for the examples below:
167
168ovl0=/sys/devices/platform/omapdss/overlay0
169ovl1=/sys/devices/platform/omapdss/overlay1
170ovl2=/sys/devices/platform/omapdss/overlay2
171
172mgr0=/sys/devices/platform/omapdss/manager0
173mgr1=/sys/devices/platform/omapdss/manager1
174
175lcd=/sys/devices/platform/omapdss/display0
176dvi=/sys/devices/platform/omapdss/display1
177tv=/sys/devices/platform/omapdss/display2
178
179fb0=/sys/class/graphics/fb0
180fb1=/sys/class/graphics/fb1
181fb2=/sys/class/graphics/fb2
182
183Default setup on OMAP3 SDP
184--------------------------
185
186Here's the default setup on OMAP3 SDP board. All planes go to LCD. DVI
187and TV-out are not in use. The columns from left to right are:
188framebuffers, overlays, overlay managers, displays. Framebuffers are
189handled by omapfb, and the rest by the DSS.
190
191FB0 --- GFX -\ DVI
192FB1 --- VID1 --+- LCD ---- LCD
193FB2 --- VID2 -/ TV ----- TV
194
195Example: Switch from LCD to DVI
196----------------------
197
198w=`cat $dvi/timings | cut -d "," -f 2 | cut -d "/" -f 1`
199h=`cat $dvi/timings | cut -d "," -f 3 | cut -d "/" -f 1`
200
201echo "0" > $lcd/enabled
202echo "" > $mgr0/display
203fbset -fb /dev/fb0 -xres $w -yres $h -vxres $w -vyres $h
204# at this point you have to switch the dvi/lcd dip-switch from the omap board
205echo "dvi" > $mgr0/display
206echo "1" > $dvi/enabled
207
208After this the configuration looks like:
209
210FB0 --- GFX -\ -- DVI
211FB1 --- VID1 --+- LCD -/ LCD
212FB2 --- VID2 -/ TV ----- TV
213
214Example: Clone GFX overlay to LCD and TV
215-------------------------------
216
217w=`cat $tv/timings | cut -d "," -f 2 | cut -d "/" -f 1`
218h=`cat $tv/timings | cut -d "," -f 3 | cut -d "/" -f 1`
219
220echo "0" > $ovl0/enabled
221echo "0" > $ovl1/enabled
222
223echo "" > $fb1/overlays
224echo "0,1" > $fb0/overlays
225
226echo "$w,$h" > $ovl1/output_size
227echo "tv" > $ovl1/manager
228
229echo "1" > $ovl0/enabled
230echo "1" > $ovl1/enabled
231
232echo "1" > $tv/enabled
233
234After this the configuration looks like (only relevant parts shown):
235
236FB0 +-- GFX ---- LCD ---- LCD
237 \- VID1 ---- TV ---- TV
238
239Misc notes
240----------
241
242OMAP FB allocates the framebuffer memory using the OMAP VRAM allocator.
243
244Using DSI DPLL to generate pixel clock it is possible produce the pixel clock
245of 86.5MHz (max possible), and with that you get 1280x1024@57 output from DVI.
246
247Rotation and mirroring currently only supports RGB565 and RGB8888 modes. VRFB
248does not support mirroring.
249
250VRFB rotation requires much more memory than non-rotated framebuffer, so you
251probably need to increase your vram setting before using VRFB rotation. Also,
252many applications may not work with VRFB if they do not pay attention to all
253framebuffer parameters.
254
255Kernel boot arguments
256---------------------
257
258vram=<size>
259 - Amount of total VRAM to preallocate. For example, "10M". omapfb
260 allocates memory for framebuffers from VRAM.
261
262omapfb.mode=<display>:<mode>[,...]
263 - Default video mode for specified displays. For example,
264 "dvi:800x400MR-24@60". See drivers/video/modedb.c.
265 There are also two special modes: "pal" and "ntsc" that
266 can be used to tv out.
267
268omapfb.vram=<fbnum>:<size>[@<physaddr>][,...]
269 - VRAM allocated for a framebuffer. Normally omapfb allocates vram
270 depending on the display size. With this you can manually allocate
271 more or define the physical address of each framebuffer. For example,
272 "1:4M" to allocate 4M for fb1.
273
274omapfb.debug=<y|n>
275 - Enable debug printing. You have to have OMAPFB debug support enabled
276 in kernel config.
277
278omapfb.test=<y|n>
279 - Draw test pattern to framebuffer whenever framebuffer settings change.
280 You need to have OMAPFB debug support enabled in kernel config.
281
282omapfb.vrfb=<y|n>
283 - Use VRFB rotation for all framebuffers.
284
285omapfb.rotate=<angle>
286 - Default rotation applied to all framebuffers.
287 0 - 0 degree rotation
288 1 - 90 degree rotation
289 2 - 180 degree rotation
290 3 - 270 degree rotation
291
292omapfb.mirror=<y|n>
293 - Default mirror for all framebuffers. Only works with DMA rotation.
294
295omapdss.def_disp=<display>
296 - Name of default display, to which all overlays will be connected.
297 Common examples are "lcd" or "tv".
298
299omapdss.debug=<y|n>
300 - Enable debug printing. You have to have DSS debug support enabled in
301 kernel config.
302
303TODO
304----
305
306DSS locking
307
308Error checking
309- Lots of checks are missing or implemented just as BUG()
310
311System DMA update for DSI
312- Can be used for RGB16 and RGB24P modes. Probably not for RGB24U (how
313 to skip the empty byte?)
314
315OMAP1 support
316- Not sure if needed
317
diff --git a/Documentation/filesystems/nilfs2.txt b/Documentation/filesystems/nilfs2.txt
index 01539f410676..4949fcaa6b6a 100644
--- a/Documentation/filesystems/nilfs2.txt
+++ b/Documentation/filesystems/nilfs2.txt
@@ -49,8 +49,7 @@ Mount options
49NILFS2 supports the following mount options: 49NILFS2 supports the following mount options:
50(*) == default 50(*) == default
51 51
52barrier=on(*) This enables/disables barriers. barrier=off disables 52nobarrier Disables barriers.
53 it, barrier=on enables it.
54errors=continue(*) Keep going on a filesystem error. 53errors=continue(*) Keep going on a filesystem error.
55errors=remount-ro Remount the filesystem read-only on an error. 54errors=remount-ro Remount the filesystem read-only on an error.
56errors=panic Panic and halt the machine if an error occurs. 55errors=panic Panic and halt the machine if an error occurs.
@@ -71,6 +70,10 @@ order=strict Apply strict in-order semantics that preserves sequence
71 blocks. That means, it is guaranteed that no 70 blocks. That means, it is guaranteed that no
72 overtaking of events occurs in the recovered file 71 overtaking of events occurs in the recovered file
73 system after a crash. 72 system after a crash.
73norecovery Disable recovery of the filesystem on mount.
74 This disables every write access on the device for
75 read-only mounts or snapshots. This option will fail
76 for r/w mounts on an unclean volume.
74 77
75NILFS2 usage 78NILFS2 usage
76============ 79============
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 777dc8a32df8..3f886e298f62 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2663,6 +2663,8 @@ and is between 256 and 4096 characters. It is defined in the file
2663 to a common usb-storage quirk flag as follows: 2663 to a common usb-storage quirk flag as follows:
2664 a = SANE_SENSE (collect more than 18 bytes 2664 a = SANE_SENSE (collect more than 18 bytes
2665 of sense data); 2665 of sense data);
2666 b = BAD_SENSE (don't collect more than 18
2667 bytes of sense data);
2666 c = FIX_CAPACITY (decrease the reported 2668 c = FIX_CAPACITY (decrease the reported
2667 device capacity by one sector); 2669 device capacity by one sector);
2668 h = CAPACITY_HEURISTICS (decrease the 2670 h = CAPACITY_HEURISTICS (decrease the
diff --git a/Documentation/powerpc/dts-bindings/xilinx.txt b/Documentation/powerpc/dts-bindings/xilinx.txt
index 80339fe4300b..ea68046bb9cb 100644
--- a/Documentation/powerpc/dts-bindings/xilinx.txt
+++ b/Documentation/powerpc/dts-bindings/xilinx.txt
@@ -292,4 +292,15 @@
292 - reg-offset : A value of 3 is required 292 - reg-offset : A value of 3 is required
293 - reg-shift : A value of 2 is required 293 - reg-shift : A value of 2 is required
294 294
295 vii) Xilinx USB Host controller
296
297 The Xilinx USB host controller is EHCI compatible but with a different
298 base address for the EHCI registers, and it is always a big-endian
299 USB Host controller. The hardware can be configured as high speed only,
300 or high speed/full speed hybrid.
301
302 Required properties:
303 - xlnx,support-usb-fs: A value 0 means the core is built as high speed
304 only. A value 1 means the core also supports
305 full speed devices.
295 306
diff --git a/Documentation/usb/power-management.txt b/Documentation/usb/power-management.txt
index ad642615ad4c..c7c1dc2f8017 100644
--- a/Documentation/usb/power-management.txt
+++ b/Documentation/usb/power-management.txt
@@ -2,7 +2,7 @@
2 2
3 Alan Stern <stern@rowland.harvard.edu> 3 Alan Stern <stern@rowland.harvard.edu>
4 4
5 October 5, 2007 5 November 10, 2009
6 6
7 7
8 8
@@ -123,9 +123,9 @@ relevant attribute files are: wakeup, level, and autosuspend.
123 123
124 power/level 124 power/level
125 125
126 This file contains one of three words: "on", "auto", 126 This file contains one of two words: "on" or "auto".
127 or "suspend". You can write those words to the file 127 You can write those words to the file to change the
128 to change the device's setting. 128 device's setting.
129 129
130 "on" means that the device should be resumed and 130 "on" means that the device should be resumed and
131 autosuspend is not allowed. (Of course, system 131 autosuspend is not allowed. (Of course, system
@@ -134,10 +134,10 @@ relevant attribute files are: wakeup, level, and autosuspend.
134 "auto" is the normal state in which the kernel is 134 "auto" is the normal state in which the kernel is
135 allowed to autosuspend and autoresume the device. 135 allowed to autosuspend and autoresume the device.
136 136
137 "suspend" means that the device should remain 137 (In kernels up to 2.6.32, you could also specify
138 suspended, and autoresume is not allowed. (But remote 138 "suspend", meaning that the device should remain
139 wakeup may still be allowed, since it is controlled 139 suspended and autoresume was not allowed. This
140 separately by the power/wakeup attribute.) 140 setting is no longer supported.)
141 141
142 power/autosuspend 142 power/autosuspend
143 143
@@ -313,13 +313,14 @@ three of the methods listed above. In addition, a driver indicates
313that it supports autosuspend by setting the .supports_autosuspend flag 313that it supports autosuspend by setting the .supports_autosuspend flag
314in its usb_driver structure. It is then responsible for informing the 314in its usb_driver structure. It is then responsible for informing the
315USB core whenever one of its interfaces becomes busy or idle. The 315USB core whenever one of its interfaces becomes busy or idle. The
316driver does so by calling these five functions: 316driver does so by calling these six functions:
317 317
318 int usb_autopm_get_interface(struct usb_interface *intf); 318 int usb_autopm_get_interface(struct usb_interface *intf);
319 void usb_autopm_put_interface(struct usb_interface *intf); 319 void usb_autopm_put_interface(struct usb_interface *intf);
320 int usb_autopm_set_interface(struct usb_interface *intf);
321 int usb_autopm_get_interface_async(struct usb_interface *intf); 320 int usb_autopm_get_interface_async(struct usb_interface *intf);
322 void usb_autopm_put_interface_async(struct usb_interface *intf); 321 void usb_autopm_put_interface_async(struct usb_interface *intf);
322 void usb_autopm_get_interface_no_resume(struct usb_interface *intf);
323 void usb_autopm_put_interface_no_suspend(struct usb_interface *intf);
323 324
324The functions work by maintaining a counter in the usb_interface 325The functions work by maintaining a counter in the usb_interface
325structure. When intf->pm_usage_count is > 0 then the interface is 326structure. When intf->pm_usage_count is > 0 then the interface is
@@ -331,11 +332,13 @@ considered to be idle, and the kernel may autosuspend the device.
331associated with the device itself rather than any of its interfaces. 332associated with the device itself rather than any of its interfaces.
332This field is used only by the USB core.) 333This field is used only by the USB core.)
333 334
334The driver owns intf->pm_usage_count; it can modify the value however 335Drivers must not modify intf->pm_usage_count directly; its value
335and whenever it likes. A nice aspect of the non-async usb_autopm_* 336should be changed only be using the functions listed above. Drivers
336routines is that the changes they make are protected by the usb_device 337are responsible for insuring that the overall change to pm_usage_count
337structure's PM mutex (udev->pm_mutex); however drivers may change 338during their lifetime balances out to 0 (it may be necessary for the
338pm_usage_count without holding the mutex. Drivers using the async 339disconnect method to call usb_autopm_put_interface() one or more times
340to fulfill this requirement). The first two routines use the PM mutex
341in struct usb_device for mutual exclusion; drivers using the async
339routines are responsible for their own synchronization and mutual 342routines are responsible for their own synchronization and mutual
340exclusion. 343exclusion.
341 344
@@ -347,11 +350,6 @@ exclusion.
347 attempts an autosuspend if the new value is <= 0 and the 350 attempts an autosuspend if the new value is <= 0 and the
348 device isn't suspended. 351 device isn't suspended.
349 352
350 usb_autopm_set_interface() leaves pm_usage_count alone.
351 It attempts an autoresume if the value is > 0 and the device
352 is suspended, and it attempts an autosuspend if the value is
353 <= 0 and the device isn't suspended.
354
355 usb_autopm_get_interface_async() and 353 usb_autopm_get_interface_async() and
356 usb_autopm_put_interface_async() do almost the same things as 354 usb_autopm_put_interface_async() do almost the same things as
357 their non-async counterparts. The differences are: they do 355 their non-async counterparts. The differences are: they do
@@ -360,13 +358,11 @@ exclusion.
360 such as an URB's completion handler, but when they return the 358 such as an URB's completion handler, but when they return the
361 device will not generally not yet be in the desired state. 359 device will not generally not yet be in the desired state.
362 360
363There also are a couple of utility routines drivers can use: 361 usb_autopm_get_interface_no_resume() and
364 362 usb_autopm_put_interface_no_suspend() merely increment or
365 usb_autopm_enable() sets pm_usage_cnt to 0 and then calls 363 decrement the pm_usage_count value; they do not attempt to
366 usb_autopm_set_interface(), which will attempt an autosuspend. 364 carry out an autoresume or an autosuspend. Hence they can be
367 365 called in an atomic context.
368 usb_autopm_disable() sets pm_usage_cnt to 1 and then calls
369 usb_autopm_set_interface(), which will attempt an autoresume.
370 366
371The conventional usage pattern is that a driver calls 367The conventional usage pattern is that a driver calls
372usb_autopm_get_interface() in its open routine and 368usb_autopm_get_interface() in its open routine and
@@ -400,11 +396,11 @@ though, setting this flag won't cause the kernel to autoresume it.
400Normally a driver would set this flag in its probe method, at which 396Normally a driver would set this flag in its probe method, at which
401time the device is guaranteed not to be autosuspended.) 397time the device is guaranteed not to be autosuspended.)
402 398
403The usb_autopm_* routines have to run in a sleepable process context; 399The synchronous usb_autopm_* routines have to run in a sleepable
404they must not be called from an interrupt handler or while holding a 400process context; they must not be called from an interrupt handler or
405spinlock. In fact, the entire autosuspend mechanism is not well geared 401while holding a spinlock. In fact, the entire autosuspend mechanism
406toward interrupt-driven operation. However there is one thing a 402is not well geared toward interrupt-driven operation. However there
407driver can do in an interrupt handler: 403is one thing a driver can do in an interrupt handler:
408 404
409 usb_mark_last_busy(struct usb_device *udev); 405 usb_mark_last_busy(struct usb_device *udev);
410 406
@@ -423,15 +419,16 @@ an URB had completed too recently.
423 419
424External suspend calls should never be allowed to fail in this way, 420External suspend calls should never be allowed to fail in this way,
425only autosuspend calls. The driver can tell them apart by checking 421only autosuspend calls. The driver can tell them apart by checking
426udev->auto_pm; this flag will be set to 1 for internal PM events 422the PM_EVENT_AUTO bit in the message.event argument to the suspend
427(autosuspend or autoresume) and 0 for external PM events. 423method; this bit will be set for internal PM events (autosuspend) and
424clear for external PM events.
428 425
429Many of the ingredients in the autosuspend framework are oriented 426Many of the ingredients in the autosuspend framework are oriented
430towards interfaces: The usb_interface structure contains the 427towards interfaces: The usb_interface structure contains the
431pm_usage_cnt field, and the usb_autopm_* routines take an interface 428pm_usage_cnt field, and the usb_autopm_* routines take an interface
432pointer as their argument. But somewhat confusingly, a few of the 429pointer as their argument. But somewhat confusingly, a few of the
433pieces (usb_mark_last_busy() and udev->auto_pm) use the usb_device 430pieces (i.e., usb_mark_last_busy()) use the usb_device structure
434structure instead. Drivers need to keep this straight; they can call 431instead. Drivers need to keep this straight; they can call
435interface_to_usbdev() to find the device structure for a given 432interface_to_usbdev() to find the device structure for a given
436interface. 433interface.
437 434
diff --git a/MAINTAINERS b/MAINTAINERS
index d7f8668b7a72..520a3b3fd82c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3903,6 +3903,23 @@ L: linux-omap@vger.kernel.org
3903S: Maintained 3903S: Maintained
3904F: drivers/video/omap/ 3904F: drivers/video/omap/
3905 3905
3906OMAP DISPLAY SUBSYSTEM SUPPORT (DSS2)
3907M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
3908L: linux-omap@vger.kernel.org
3909L: linux-fbdev@vger.kernel.org (moderated for non-subscribers)
3910S: Maintained
3911F: drivers/video/omap2/dss/
3912F: drivers/video/omap2/vrfb.c
3913F: drivers/video/omap2/vram.c
3914F: Documentation/arm/OMAP/DSS
3915
3916OMAP FRAMEBUFFER SUPPORT (FOR DSS2)
3917M: Tomi Valkeinen <tomi.valkeinen@nokia.com>
3918L: linux-omap@vger.kernel.org
3919L: linux-fbdev@vger.kernel.org (moderated for non-subscribers)
3920S: Maintained
3921F: drivers/video/omap2/omapfb/
3922
3906OMAP MMC SUPPORT 3923OMAP MMC SUPPORT
3907M: Jarkko Lavinen <jarkko.lavinen@nokia.com> 3924M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
3908L: linux-omap@vger.kernel.org 3925L: linux-omap@vger.kernel.org
@@ -5663,9 +5680,11 @@ S: Maintained
5663F: drivers/net/wireless/rndis_wlan.c 5680F: drivers/net/wireless/rndis_wlan.c
5664 5681
5665USB XHCI DRIVER 5682USB XHCI DRIVER
5666M: Sarah Sharp <sarah.a.sharp@intel.com> 5683M: Sarah Sharp <sarah.a.sharp@linux.intel.com>
5667L: linux-usb@vger.kernel.org 5684L: linux-usb@vger.kernel.org
5668S: Supported 5685S: Supported
5686F: drivers/usb/host/xhci*
5687F: drivers/usb/host/pci-quirks*
5669 5688
5670USB ZC0301 DRIVER 5689USB ZC0301 DRIVER
5671M: Luca Risolia <luca.risolia@studio.unibo.it> 5690M: Luca Risolia <luca.risolia@studio.unibo.it>
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 9a3334ae282e..62619f25132f 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -178,25 +178,18 @@ SYSCALL_DEFINE6(osf_mmap, unsigned long, addr, unsigned long, len,
178 unsigned long, prot, unsigned long, flags, unsigned long, fd, 178 unsigned long, prot, unsigned long, flags, unsigned long, fd,
179 unsigned long, off) 179 unsigned long, off)
180{ 180{
181 struct file *file = NULL; 181 unsigned long ret = -EINVAL;
182 unsigned long ret = -EBADF;
183 182
184#if 0 183#if 0
185 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED)) 184 if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
186 printk("%s: unimplemented OSF mmap flags %04lx\n", 185 printk("%s: unimplemented OSF mmap flags %04lx\n",
187 current->comm, flags); 186 current->comm, flags);
188#endif 187#endif
189 if (!(flags & MAP_ANONYMOUS)) { 188 if ((off + PAGE_ALIGN(len)) < off)
190 file = fget(fd); 189 goto out;
191 if (!file) 190 if (off & ~PAGE_MASK)
192 goto out; 191 goto out;
193 } 192 ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
194 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
195 down_write(&current->mm->mmap_sem);
196 ret = do_mmap(file, addr, len, prot, flags, off);
197 up_write(&current->mm->mmap_sem);
198 if (file)
199 fput(file);
200 out: 193 out:
201 return ret; 194 return ret;
202} 195}
diff --git a/arch/arm/configs/omap_3430sdp_defconfig b/arch/arm/configs/omap_3430sdp_defconfig
index 84829587d55a..592457cfbbe5 100644
--- a/arch/arm/configs/omap_3430sdp_defconfig
+++ b/arch/arm/configs/omap_3430sdp_defconfig
@@ -963,10 +963,32 @@ CONFIG_FB_CFB_IMAGEBLIT=y
963# 963#
964# CONFIG_FB_S1D13XXX is not set 964# CONFIG_FB_S1D13XXX is not set
965# CONFIG_FB_VIRTUAL is not set 965# CONFIG_FB_VIRTUAL is not set
966CONFIG_FB_OMAP=y 966# CONFIG_FB_METRONOME is not set
967# CONFIG_FB_OMAP_LCDC_EXTERNAL is not set 967# CONFIG_FB_MB862XX is not set
968# CONFIG_FB_BROADSHEET is not set
969# CONFIG_FB_OMAP_LCD_VGA is not set
968# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set 970# CONFIG_FB_OMAP_BOOTLOADER_INIT is not set
969CONFIG_FB_OMAP_CONSISTENT_DMA_SIZE=2 971CONFIG_OMAP2_VRAM=y
972CONFIG_OMAP2_VRFB=y
973CONFIG_OMAP2_DSS=y
974CONFIG_OMAP2_VRAM_SIZE=4
975CONFIG_OMAP2_DSS_DEBUG_SUPPORT=y
976# CONFIG_OMAP2_DSS_RFBI is not set
977CONFIG_OMAP2_DSS_VENC=y
978# CONFIG_OMAP2_DSS_SDI is not set
979# CONFIG_OMAP2_DSS_DSI is not set
980# CONFIG_OMAP2_DSS_FAKE_VSYNC is not set
981CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK=0
982CONFIG_FB_OMAP2=y
983CONFIG_FB_OMAP2_DEBUG_SUPPORT=y
984# CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE is not set
985CONFIG_FB_OMAP2_NUM_FBS=3
986
987#
988# OMAP2/3 Display Device Drivers
989#
990CONFIG_PANEL_GENERIC=y
991CONFIG_PANEL_SHARP_LS037V7DW01=y
970# CONFIG_BACKLIGHT_LCD_SUPPORT is not set 992# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
971 993
972# 994#
diff --git a/arch/arm/include/asm/mman.h b/arch/arm/include/asm/mman.h
index 8eebf89f5ab1..41f99c573b93 100644
--- a/arch/arm/include/asm/mman.h
+++ b/arch/arm/include/asm/mman.h
@@ -1 +1,4 @@
1#include <asm-generic/mman.h> 1#include <asm-generic/mman.h>
2
3#define arch_mmap_check(addr, len, flags) \
4 (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0)
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index f58c1156e779..9314a2d681f1 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -172,7 +172,7 @@
172/* 160 */ CALL(sys_sched_get_priority_min) 172/* 160 */ CALL(sys_sched_get_priority_min)
173 CALL(sys_sched_rr_get_interval) 173 CALL(sys_sched_rr_get_interval)
174 CALL(sys_nanosleep) 174 CALL(sys_nanosleep)
175 CALL(sys_arm_mremap) 175 CALL(sys_mremap)
176 CALL(sys_setresuid16) 176 CALL(sys_setresuid16)
177/* 165 */ CALL(sys_getresuid16) 177/* 165 */ CALL(sys_getresuid16)
178 CALL(sys_ni_syscall) /* vm86 */ 178 CALL(sys_ni_syscall) /* vm86 */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index f0fe95b7085d..2c1db77d7848 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -416,12 +416,12 @@ sys_mmap2:
416 tst r5, #PGOFF_MASK 416 tst r5, #PGOFF_MASK
417 moveq r5, r5, lsr #PAGE_SHIFT - 12 417 moveq r5, r5, lsr #PAGE_SHIFT - 12
418 streq r5, [sp, #4] 418 streq r5, [sp, #4]
419 beq do_mmap2 419 beq sys_mmap_pgoff
420 mov r0, #-EINVAL 420 mov r0, #-EINVAL
421 mov pc, lr 421 mov pc, lr
422#else 422#else
423 str r5, [sp, #4] 423 str r5, [sp, #4]
424 b do_mmap2 424 b sys_mmap_pgoff
425#endif 425#endif
426ENDPROC(sys_mmap2) 426ENDPROC(sys_mmap2)
427 427
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 78ecaac65206..ae4027bd01bd 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -28,41 +28,6 @@
28#include <linux/ipc.h> 28#include <linux/ipc.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30 30
31extern unsigned long do_mremap(unsigned long addr, unsigned long old_len,
32 unsigned long new_len, unsigned long flags,
33 unsigned long new_addr);
34
35/* common code for old and new mmaps */
36inline long do_mmap2(
37 unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags,
39 unsigned long fd, unsigned long pgoff)
40{
41 int error = -EINVAL;
42 struct file * file = NULL;
43
44 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
45
46 if (flags & MAP_FIXED && addr < FIRST_USER_ADDRESS)
47 goto out;
48
49 error = -EBADF;
50 if (!(flags & MAP_ANONYMOUS)) {
51 file = fget(fd);
52 if (!file)
53 goto out;
54 }
55
56 down_write(&current->mm->mmap_sem);
57 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
58 up_write(&current->mm->mmap_sem);
59
60 if (file)
61 fput(file);
62out:
63 return error;
64}
65
66struct mmap_arg_struct { 31struct mmap_arg_struct {
67 unsigned long addr; 32 unsigned long addr;
68 unsigned long len; 33 unsigned long len;
@@ -84,29 +49,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
84 if (a.offset & ~PAGE_MASK) 49 if (a.offset & ~PAGE_MASK)
85 goto out; 50 goto out;
86 51
87 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 52 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
88out: 53out:
89 return error; 54 return error;
90} 55}
91 56
92asmlinkage unsigned long
93sys_arm_mremap(unsigned long addr, unsigned long old_len,
94 unsigned long new_len, unsigned long flags,
95 unsigned long new_addr)
96{
97 unsigned long ret = -EINVAL;
98
99 if (flags & MREMAP_FIXED && new_addr < FIRST_USER_ADDRESS)
100 goto out;
101
102 down_write(&current->mm->mmap_sem);
103 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
104 up_write(&current->mm->mmap_sem);
105
106out:
107 return ret;
108}
109
110/* 57/*
111 * Perform the select(nd, in, out, ex, tv) and mmap() system 58 * Perform the select(nd, in, out, ex, tv) and mmap() system
112 * calls. 59 * calls.
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 5a275bab2dfe..71e1a3fad0ea 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -14,6 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/input.h> 15#include <linux/input.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/omapfb.h>
17 18
18#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
19#include <linux/spi/ads7846.h> 20#include <linux/spi/ads7846.h>
@@ -32,7 +33,6 @@
32#include <plat/keypad.h> 33#include <plat/keypad.h>
33#include <plat/common.h> 34#include <plat/common.h>
34#include <plat/dsp_common.h> 35#include <plat/dsp_common.h>
35#include <plat/omapfb.h>
36#include <plat/hwa742.h> 36#include <plat/hwa742.h>
37#include <plat/lcd_mipid.h> 37#include <plat/lcd_mipid.h>
38#include <plat/mmc.h> 38#include <plat/mmc.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 491364e44c7d..5bda9fdbee9e 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -37,6 +37,7 @@
37#include <plat/common.h> 37#include <plat/common.h>
38#include <plat/dma.h> 38#include <plat/dma.h>
39#include <plat/gpmc.h> 39#include <plat/gpmc.h>
40#include <plat/display.h>
40 41
41#include <plat/control.h> 42#include <plat/control.h>
42#include <plat/gpmc-smc91x.h> 43#include <plat/gpmc-smc91x.h>
@@ -152,31 +153,152 @@ static struct spi_board_info sdp3430_spi_board_info[] __initdata = {
152 }, 153 },
153}; 154};
154 155
155static struct platform_device sdp3430_lcd_device = { 156
156 .name = "sdp2430_lcd", 157#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8
157 .id = -1, 158#define SDP3430_LCD_PANEL_ENABLE_GPIO 5
159
160static unsigned backlight_gpio;
161static unsigned enable_gpio;
162static int lcd_enabled;
163static int dvi_enabled;
164
165static void __init sdp3430_display_init(void)
166{
167 int r;
168
169 enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO;
170 backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO;
171
172 r = gpio_request(enable_gpio, "LCD reset");
173 if (r) {
174 printk(KERN_ERR "failed to get LCD reset GPIO\n");
175 goto err0;
176 }
177
178 r = gpio_request(backlight_gpio, "LCD Backlight");
179 if (r) {
180 printk(KERN_ERR "failed to get LCD backlight GPIO\n");
181 goto err1;
182 }
183
184 gpio_direction_output(enable_gpio, 0);
185 gpio_direction_output(backlight_gpio, 0);
186
187 return;
188err1:
189 gpio_free(enable_gpio);
190err0:
191 return;
192}
193
194static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev)
195{
196 if (dvi_enabled) {
197 printk(KERN_ERR "cannot enable LCD, DVI is enabled\n");
198 return -EINVAL;
199 }
200
201 gpio_direction_output(enable_gpio, 1);
202 gpio_direction_output(backlight_gpio, 1);
203
204 lcd_enabled = 1;
205
206 return 0;
207}
208
209static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev)
210{
211 lcd_enabled = 0;
212
213 gpio_direction_output(enable_gpio, 0);
214 gpio_direction_output(backlight_gpio, 0);
215}
216
217static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev)
218{
219 if (lcd_enabled) {
220 printk(KERN_ERR "cannot enable DVI, LCD is enabled\n");
221 return -EINVAL;
222 }
223
224 dvi_enabled = 1;
225
226 return 0;
227}
228
229static void sdp3430_panel_disable_dvi(struct omap_dss_device *dssdev)
230{
231 dvi_enabled = 0;
232}
233
234static int sdp3430_panel_enable_tv(struct omap_dss_device *dssdev)
235{
236 return 0;
237}
238
239static void sdp3430_panel_disable_tv(struct omap_dss_device *dssdev)
240{
241}
242
243
244static struct omap_dss_device sdp3430_lcd_device = {
245 .name = "lcd",
246 .driver_name = "sharp_ls_panel",
247 .type = OMAP_DISPLAY_TYPE_DPI,
248 .phy.dpi.data_lines = 16,
249 .platform_enable = sdp3430_panel_enable_lcd,
250 .platform_disable = sdp3430_panel_disable_lcd,
158}; 251};
159 252
160static struct regulator_consumer_supply sdp3430_vdac_supply = { 253static struct omap_dss_device sdp3430_dvi_device = {
161 .supply = "vdac", 254 .name = "dvi",
162 .dev = &sdp3430_lcd_device.dev, 255 .driver_name = "generic_panel",
256 .type = OMAP_DISPLAY_TYPE_DPI,
257 .phy.dpi.data_lines = 24,
258 .platform_enable = sdp3430_panel_enable_dvi,
259 .platform_disable = sdp3430_panel_disable_dvi,
163}; 260};
164 261
165static struct regulator_consumer_supply sdp3430_vdvi_supply = { 262static struct omap_dss_device sdp3430_tv_device = {
166 .supply = "vdvi", 263 .name = "tv",
167 .dev = &sdp3430_lcd_device.dev, 264 .driver_name = "venc",
265 .type = OMAP_DISPLAY_TYPE_VENC,
266 .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
267 .platform_enable = sdp3430_panel_enable_tv,
268 .platform_disable = sdp3430_panel_disable_tv,
168}; 269};
169 270
170static struct platform_device *sdp3430_devices[] __initdata = { 271
272static struct omap_dss_device *sdp3430_dss_devices[] = {
171 &sdp3430_lcd_device, 273 &sdp3430_lcd_device,
274 &sdp3430_dvi_device,
275 &sdp3430_tv_device,
172}; 276};
173 277
174static struct omap_lcd_config sdp3430_lcd_config __initdata = { 278static struct omap_dss_board_info sdp3430_dss_data = {
175 .ctrl_name = "internal", 279 .num_devices = ARRAY_SIZE(sdp3430_dss_devices),
280 .devices = sdp3430_dss_devices,
281 .default_device = &sdp3430_lcd_device,
282};
283
284static struct platform_device sdp3430_dss_device = {
285 .name = "omapdss",
286 .id = -1,
287 .dev = {
288 .platform_data = &sdp3430_dss_data,
289 },
290};
291
292static struct regulator_consumer_supply sdp3430_vdda_dac_supply = {
293 .supply = "vdda_dac",
294 .dev = &sdp3430_dss_device.dev,
295};
296
297static struct platform_device *sdp3430_devices[] __initdata = {
298 &sdp3430_dss_device,
176}; 299};
177 300
178static struct omap_board_config_kernel sdp3430_config[] __initdata = { 301static struct omap_board_config_kernel sdp3430_config[] __initdata = {
179 { OMAP_TAG_LCD, &sdp3430_lcd_config },
180}; 302};
181 303
182static void __init omap_3430sdp_init_irq(void) 304static void __init omap_3430sdp_init_irq(void)
@@ -392,22 +514,34 @@ static struct regulator_init_data sdp3430_vdac = {
392 | REGULATOR_CHANGE_STATUS, 514 | REGULATOR_CHANGE_STATUS,
393 }, 515 },
394 .num_consumer_supplies = 1, 516 .num_consumer_supplies = 1,
395 .consumer_supplies = &sdp3430_vdac_supply, 517 .consumer_supplies = &sdp3430_vdda_dac_supply,
396}; 518};
397 519
398/* VPLL2 for digital video outputs */ 520/* VPLL2 for digital video outputs */
521static struct regulator_consumer_supply sdp3430_vpll2_supplies[] = {
522 {
523 .supply = "vdvi",
524 .dev = &sdp3430_lcd_device.dev,
525 },
526 {
527 .supply = "vdds_dsi",
528 .dev = &sdp3430_dss_device.dev,
529 }
530};
531
399static struct regulator_init_data sdp3430_vpll2 = { 532static struct regulator_init_data sdp3430_vpll2 = {
400 .constraints = { 533 .constraints = {
401 .name = "VDVI", 534 .name = "VDVI",
402 .min_uV = 1800000, 535 .min_uV = 1800000,
403 .max_uV = 1800000, 536 .max_uV = 1800000,
537 .apply_uV = true,
404 .valid_modes_mask = REGULATOR_MODE_NORMAL 538 .valid_modes_mask = REGULATOR_MODE_NORMAL
405 | REGULATOR_MODE_STANDBY, 539 | REGULATOR_MODE_STANDBY,
406 .valid_ops_mask = REGULATOR_CHANGE_MODE 540 .valid_ops_mask = REGULATOR_CHANGE_MODE
407 | REGULATOR_CHANGE_STATUS, 541 | REGULATOR_CHANGE_STATUS,
408 }, 542 },
409 .num_consumer_supplies = 1, 543 .num_consumer_supplies = ARRAY_SIZE(sdp3430_vpll2_supplies),
410 .consumer_supplies = &sdp3430_vdvi_supply, 544 .consumer_supplies = sdp3430_vpll2_supplies,
411}; 545};
412 546
413static struct twl4030_codec_audio_data sdp3430_audio = { 547static struct twl4030_codec_audio_data sdp3430_audio = {
@@ -521,6 +655,7 @@ static void __init omap_3430sdp_init(void)
521 omap_serial_init(); 655 omap_serial_init();
522 usb_musb_init(); 656 usb_musb_init();
523 board_smc91x_init(); 657 board_smc91x_init();
658 sdp3430_display_init();
524 enable_board_wakeup_source(); 659 enable_board_wakeup_source();
525 usb_ehci_init(&ehci_pdata); 660 usb_ehci_init(&ehci_pdata);
526} 661}
diff --git a/arch/arm/mach-omap2/clock24xx.c b/arch/arm/mach-omap2/clock24xx.c
index e70e7e000eaa..845b478ebeee 100644
--- a/arch/arm/mach-omap2/clock24xx.c
+++ b/arch/arm/mach-omap2/clock24xx.c
@@ -116,10 +116,10 @@ static struct omap_clk omap24xx_clks[] = {
116 CLK(NULL, "mdm_ick", &mdm_ick, CK_243X), 116 CLK(NULL, "mdm_ick", &mdm_ick, CK_243X),
117 CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X), 117 CLK(NULL, "mdm_osc_ck", &mdm_osc_ck, CK_243X),
118 /* DSS domain clocks */ 118 /* DSS domain clocks */
119 CLK("omapfb", "ick", &dss_ick, CK_243X | CK_242X), 119 CLK("omapdss", "ick", &dss_ick, CK_243X | CK_242X),
120 CLK("omapfb", "dss1_fck", &dss1_fck, CK_243X | CK_242X), 120 CLK("omapdss", "dss1_fck", &dss1_fck, CK_243X | CK_242X),
121 CLK("omapfb", "dss2_fck", &dss2_fck, CK_243X | CK_242X), 121 CLK("omapdss", "dss2_fck", &dss2_fck, CK_243X | CK_242X),
122 CLK("omapfb", "tv_fck", &dss_54m_fck, CK_243X | CK_242X), 122 CLK("omapdss", "tv_fck", &dss_54m_fck, CK_243X | CK_242X),
123 /* L3 domain clocks */ 123 /* L3 domain clocks */
124 CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X), 124 CLK(NULL, "core_l3_ck", &core_l3_ck, CK_243X | CK_242X),
125 CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X), 125 CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_243X | CK_242X),
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 9f2feaf79865..ecbb5cd8eec8 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -236,13 +236,13 @@ static struct omap_clk omap34xx_clks[] = {
236 CLK("omap_rng", "ick", &rng_ick, CK_343X), 236 CLK("omap_rng", "ick", &rng_ick, CK_343X),
237 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X), 237 CLK(NULL, "sha11_ick", &sha11_ick, CK_343X),
238 CLK(NULL, "des1_ick", &des1_ick, CK_343X), 238 CLK(NULL, "des1_ick", &des1_ick, CK_343X),
239 CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1), 239 CLK("omapdss", "dss1_fck", &dss1_alwon_fck_3430es1, CK_3430ES1),
240 CLK("omapfb", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2), 240 CLK("omapdss", "dss1_fck", &dss1_alwon_fck_3430es2, CK_3430ES2),
241 CLK("omapfb", "tv_fck", &dss_tv_fck, CK_343X), 241 CLK("omapdss", "tv_fck", &dss_tv_fck, CK_343X),
242 CLK("omapfb", "video_fck", &dss_96m_fck, CK_343X), 242 CLK("omapdss", "video_fck", &dss_96m_fck, CK_343X),
243 CLK("omapfb", "dss2_fck", &dss2_alwon_fck, CK_343X), 243 CLK("omapdss", "dss2_fck", &dss2_alwon_fck, CK_343X),
244 CLK("omapfb", "ick", &dss_ick_3430es1, CK_3430ES1), 244 CLK("omapdss", "ick", &dss_ick_3430es1, CK_3430ES1),
245 CLK("omapfb", "ick", &dss_ick_3430es2, CK_3430ES2), 245 CLK("omapdss", "ick", &dss_ick_3430es2, CK_3430ES2),
246 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X), 246 CLK(NULL, "cam_mclk", &cam_mclk, CK_343X),
247 CLK(NULL, "cam_ick", &cam_ick, CK_343X), 247 CLK(NULL, "cam_ick", &cam_ick, CK_343X),
248 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X), 248 CLK(NULL, "csi2_96m_fck", &csi2_96m_fck, CK_343X),
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 59d28b2fd8c5..6a4d8e468703 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -22,17 +22,18 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/omapfb.h>
25 26
26#include <asm/tlb.h> 27#include <asm/tlb.h>
27 28
28#include <asm/mach/map.h> 29#include <asm/mach/map.h>
29 30
30#include <plat/mux.h> 31#include <plat/mux.h>
31#include <plat/omapfb.h>
32#include <plat/sram.h> 32#include <plat/sram.h>
33#include <plat/sdrc.h> 33#include <plat/sdrc.h>
34#include <plat/gpmc.h> 34#include <plat/gpmc.h>
35#include <plat/serial.h> 35#include <plat/serial.h>
36#include <plat/vram.h>
36 37
37#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdev is ready */ 38#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once clkdev is ready */
38#include "clock.h" 39#include "clock.h"
@@ -264,6 +265,7 @@ void __init omap2_map_common_io(void)
264 omap2_check_revision(); 265 omap2_check_revision();
265 omap_sram_init(); 266 omap_sram_init();
266 omapfb_reserve_sdram(); 267 omapfb_reserve_sdram();
268 omap_vram_reserve_sdram();
267} 269}
268 270
269/* 271/*
diff --git a/arch/arm/mach-omap2/sdrc.c b/arch/arm/mach-omap2/sdrc.c
index 9a592199321c..cbfbd142e946 100644
--- a/arch/arm/mach-omap2/sdrc.c
+++ b/arch/arm/mach-omap2/sdrc.c
@@ -160,3 +160,19 @@ void __init omap2_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
160 sdrc_write_reg(l, SDRC_POWER); 160 sdrc_write_reg(l, SDRC_POWER);
161 omap2_sms_save_context(); 161 omap2_sms_save_context();
162} 162}
163
164void omap2_sms_write_rot_control(u32 val, unsigned ctx)
165{
166 sms_write_reg(val, SMS_ROT_CONTROL(ctx));
167}
168
169void omap2_sms_write_rot_size(u32 val, unsigned ctx)
170{
171 sms_write_reg(val, SMS_ROT_SIZE(ctx));
172}
173
174void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx)
175{
176 sms_write_reg(val, SMS_ROT_PHYSICAL_BA(ctx));
177}
178
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 2b7996401b0f..f5abc51c5a07 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -54,7 +54,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
54 * We enforce the MAP_FIXED case. 54 * We enforce the MAP_FIXED case.
55 */ 55 */
56 if (flags & MAP_FIXED) { 56 if (flags & MAP_FIXED) {
57 if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) 57 if (aliasing && flags & MAP_SHARED &&
58 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
58 return -EINVAL; 59 return -EINVAL;
59 return addr; 60 return addr;
60 } 61 }
diff --git a/arch/arm/plat-mxc/Makefile b/arch/arm/plat-mxc/Makefile
index 4cbca9da1505..996cbac6932c 100644
--- a/arch/arm/plat-mxc/Makefile
+++ b/arch/arm/plat-mxc/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_ARCH_MX1) += iomux-mx1-mx2.o dma-mx1-mx2.o
9obj-$(CONFIG_ARCH_MX2) += iomux-mx1-mx2.o dma-mx1-mx2.o 9obj-$(CONFIG_ARCH_MX2) += iomux-mx1-mx2.o dma-mx1-mx2.o
10obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o 10obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o
11obj-$(CONFIG_MXC_PWM) += pwm.o 11obj-$(CONFIG_MXC_PWM) += pwm.o
12obj-$(CONFIG_USB_EHCI_MXC) += ehci.o
12obj-$(CONFIG_MXC_ULPI) += ulpi.o 13obj-$(CONFIG_MXC_ULPI) += ulpi.o
13obj-$(CONFIG_ARCH_MXC_AUDMUX_V1) += audmux-v1.o 14obj-$(CONFIG_ARCH_MXC_AUDMUX_V1) += audmux-v1.o
14obj-$(CONFIG_ARCH_MXC_AUDMUX_V2) += audmux-v2.o 15obj-$(CONFIG_ARCH_MXC_AUDMUX_V2) += audmux-v2.o
diff --git a/arch/arm/plat-mxc/ehci.c b/arch/arm/plat-mxc/ehci.c
new file mode 100644
index 000000000000..41599be882e8
--- /dev/null
+++ b/arch/arm/plat-mxc/ehci.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 */
18
19#include <linux/platform_device.h>
20#include <linux/io.h>
21
22#include <mach/hardware.h>
23#include <mach/mxc_ehci.h>
24
25#define USBCTRL_OTGBASE_OFFSET 0x600
26
27#define MX31_OTG_SIC_SHIFT 29
28#define MX31_OTG_SIC_MASK (0xf << MX31_OTG_SIC_SHIFT)
29#define MX31_OTG_PM_BIT (1 << 24)
30
31#define MX31_H2_SIC_SHIFT 21
32#define MX31_H2_SIC_MASK (0xf << MX31_H2_SIC_SHIFT)
33#define MX31_H2_PM_BIT (1 << 16)
34#define MX31_H2_DT_BIT (1 << 5)
35
36#define MX31_H1_SIC_SHIFT 13
37#define MX31_H1_SIC_MASK (0xf << MX31_H1_SIC_SHIFT)
38#define MX31_H1_PM_BIT (1 << 8)
39#define MX31_H1_DT_BIT (1 << 4)
40
41int mxc_set_usbcontrol(int port, unsigned int flags)
42{
43 unsigned int v;
44
45 if (cpu_is_mx31()) {
46 v = readl(IO_ADDRESS(MX31_OTG_BASE_ADDR +
47 USBCTRL_OTGBASE_OFFSET));
48
49 switch (port) {
50 case 0: /* OTG port */
51 v &= ~(MX31_OTG_SIC_MASK | MX31_OTG_PM_BIT);
52 v |= (flags & MXC_EHCI_INTERFACE_MASK)
53 << MX31_OTG_SIC_SHIFT;
54 if (flags & MXC_EHCI_POWER_PINS_ENABLED)
55 v |= MX31_OTG_PM_BIT;
56
57 break;
58 case 1: /* H1 port */
59 v &= ~(MX31_H1_SIC_MASK | MX31_H1_PM_BIT);
60 v |= (flags & MXC_EHCI_INTERFACE_MASK)
61 << MX31_H1_SIC_SHIFT;
62 if (flags & MXC_EHCI_POWER_PINS_ENABLED)
63 v |= MX31_H1_PM_BIT;
64
65 if (!(flags & MXC_EHCI_TTL_ENABLED))
66 v |= MX31_H1_DT_BIT;
67
68 break;
69 case 2: /* H2 port */
70 v &= ~(MX31_H2_SIC_MASK | MX31_H2_PM_BIT);
71 v |= (flags & MXC_EHCI_INTERFACE_MASK)
72 << MX31_H2_SIC_SHIFT;
73 if (!(flags & MXC_EHCI_POWER_PINS_ENABLED))
74 v |= MX31_H2_PM_BIT;
75
76 if (!(flags & MXC_EHCI_TTL_ENABLED))
77 v |= MX31_H2_DT_BIT;
78
79 break;
80 }
81
82 writel(v, IO_ADDRESS(MX31_OTG_BASE_ADDR +
83 USBCTRL_OTGBASE_OFFSET));
84 return 0;
85 }
86
87 printk(KERN_WARNING
88 "%s() unable to setup USBCONTROL for this CPU\n", __func__);
89 return -EINVAL;
90}
91EXPORT_SYMBOL(mxc_set_usbcontrol);
92
diff --git a/arch/arm/plat-mxc/include/mach/mxc_ehci.h b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
new file mode 100644
index 000000000000..8f796239393e
--- /dev/null
+++ b/arch/arm/plat-mxc/include/mach/mxc_ehci.h
@@ -0,0 +1,37 @@
1#ifndef __INCLUDE_ASM_ARCH_MXC_EHCI_H
2#define __INCLUDE_ASM_ARCH_MXC_EHCI_H
3
4/* values for portsc field */
5#define MXC_EHCI_PHY_LOW_POWER_SUSPEND (1 << 23)
6#define MXC_EHCI_FORCE_FS (1 << 24)
7#define MXC_EHCI_UTMI_8BIT (0 << 28)
8#define MXC_EHCI_UTMI_16BIT (1 << 28)
9#define MXC_EHCI_SERIAL (1 << 29)
10#define MXC_EHCI_MODE_UTMI (0 << 30)
11#define MXC_EHCI_MODE_PHILIPS (1 << 30)
12#define MXC_EHCI_MODE_ULPI (2 << 30)
13#define MXC_EHCI_MODE_SERIAL (3 << 30)
14
15/* values for flags field */
16#define MXC_EHCI_INTERFACE_DIFF_UNI (0 << 0)
17#define MXC_EHCI_INTERFACE_DIFF_BI (1 << 0)
18#define MXC_EHCI_INTERFACE_SINGLE_UNI (2 << 0)
19#define MXC_EHCI_INTERFACE_SINGLE_BI (3 << 0)
20#define MXC_EHCI_INTERFACE_MASK (0xf)
21
22#define MXC_EHCI_POWER_PINS_ENABLED (1 << 5)
23#define MXC_EHCI_TTL_ENABLED (1 << 6)
24
25struct mxc_usbh_platform_data {
26 int (*init)(struct platform_device *pdev);
27 int (*exit)(struct platform_device *pdev);
28
29 unsigned int portsc;
30 unsigned int flags;
31 struct otg_transceiver *otg;
32};
33
34int mxc_set_usbcontrol(int port, unsigned int flags);
35
36#endif /* __INCLUDE_ASM_ARCH_MXC_EHCI_H */
37
diff --git a/arch/arm/plat-omap/fb.c b/arch/arm/plat-omap/fb.c
index 78a4ce538dbd..d3eea4f47533 100644
--- a/arch/arm/plat-omap/fb.c
+++ b/arch/arm/plat-omap/fb.c
@@ -28,13 +28,13 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/bootmem.h> 29#include <linux/bootmem.h>
30#include <linux/io.h> 30#include <linux/io.h>
31#include <linux/omapfb.h>
31 32
32#include <mach/hardware.h> 33#include <mach/hardware.h>
33#include <asm/mach/map.h> 34#include <asm/mach/map.h>
34 35
35#include <plat/board.h> 36#include <plat/board.h>
36#include <plat/sram.h> 37#include <plat/sram.h>
37#include <plat/omapfb.h>
38 38
39#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE) 39#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
40 40
@@ -55,6 +55,10 @@ static struct platform_device omap_fb_device = {
55 .num_resources = 0, 55 .num_resources = 0,
56}; 56};
57 57
58void omapfb_set_platform_data(struct omapfb_platform_data *data)
59{
60}
61
58static inline int ranges_overlap(unsigned long start1, unsigned long size1, 62static inline int ranges_overlap(unsigned long start1, unsigned long size1,
59 unsigned long start2, unsigned long size2) 63 unsigned long start2, unsigned long size2)
60{ 64{
@@ -327,7 +331,33 @@ static inline int omap_init_fb(void)
327 331
328arch_initcall(omap_init_fb); 332arch_initcall(omap_init_fb);
329 333
330#else 334#elif defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
335
336static u64 omap_fb_dma_mask = ~(u32)0;
337static struct omapfb_platform_data omapfb_config;
338
339static struct platform_device omap_fb_device = {
340 .name = "omapfb",
341 .id = -1,
342 .dev = {
343 .dma_mask = &omap_fb_dma_mask,
344 .coherent_dma_mask = ~(u32)0,
345 .platform_data = &omapfb_config,
346 },
347 .num_resources = 0,
348};
349
350void omapfb_set_platform_data(struct omapfb_platform_data *data)
351{
352 omapfb_config = *data;
353}
354
355static inline int omap_init_fb(void)
356{
357 return platform_device_register(&omap_fb_device);
358}
359
360arch_initcall(omap_init_fb);
331 361
332void omapfb_reserve_sdram(void) {} 362void omapfb_reserve_sdram(void) {}
333unsigned long omapfb_reserve_sram(unsigned long sram_pstart, 363unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
@@ -339,5 +369,20 @@ unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
339 return 0; 369 return 0;
340} 370}
341 371
372#else
373
374void omapfb_set_platform_data(struct omapfb_platform_data *data)
375{
376}
377
378void omapfb_reserve_sdram(void) {}
379unsigned long omapfb_reserve_sram(unsigned long sram_pstart,
380 unsigned long sram_vstart,
381 unsigned long sram_size,
382 unsigned long start_avail,
383 unsigned long size_avail)
384{
385 return 0;
386}
342 387
343#endif 388#endif
diff --git a/arch/arm/plat-omap/include/plat/display.h b/arch/arm/plat-omap/include/plat/display.h
new file mode 100644
index 000000000000..c66e464732df
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/display.h
@@ -0,0 +1,575 @@
1/*
2 * linux/include/asm-arm/arch-omap/display.h
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#ifndef __ASM_ARCH_OMAP_DISPLAY_H
21#define __ASM_ARCH_OMAP_DISPLAY_H
22
23#include <linux/list.h>
24#include <linux/kobject.h>
25#include <linux/device.h>
26#include <asm/atomic.h>
27
28#define DISPC_IRQ_FRAMEDONE (1 << 0)
29#define DISPC_IRQ_VSYNC (1 << 1)
30#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
31#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
32#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
33#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
34#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
35#define DISPC_IRQ_GFX_END_WIN (1 << 7)
36#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
37#define DISPC_IRQ_OCP_ERR (1 << 9)
38#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
39#define DISPC_IRQ_VID1_END_WIN (1 << 11)
40#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
41#define DISPC_IRQ_VID2_END_WIN (1 << 13)
42#define DISPC_IRQ_SYNC_LOST (1 << 14)
43#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
44#define DISPC_IRQ_WAKEUP (1 << 16)
45
46struct omap_dss_device;
47struct omap_overlay_manager;
48
49enum omap_display_type {
50 OMAP_DISPLAY_TYPE_NONE = 0,
51 OMAP_DISPLAY_TYPE_DPI = 1 << 0,
52 OMAP_DISPLAY_TYPE_DBI = 1 << 1,
53 OMAP_DISPLAY_TYPE_SDI = 1 << 2,
54 OMAP_DISPLAY_TYPE_DSI = 1 << 3,
55 OMAP_DISPLAY_TYPE_VENC = 1 << 4,
56};
57
58enum omap_plane {
59 OMAP_DSS_GFX = 0,
60 OMAP_DSS_VIDEO1 = 1,
61 OMAP_DSS_VIDEO2 = 2
62};
63
64enum omap_channel {
65 OMAP_DSS_CHANNEL_LCD = 0,
66 OMAP_DSS_CHANNEL_DIGIT = 1,
67};
68
69enum omap_color_mode {
70 OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
71 OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
72 OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
73 OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
74 OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
75 OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
76 OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
77 OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
78 OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
79 OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
80 OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
81 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
82 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
83 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
84
85 OMAP_DSS_COLOR_GFX_OMAP2 =
86 OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
87 OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
88 OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
89 OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P,
90
91 OMAP_DSS_COLOR_VID_OMAP2 =
92 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
93 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
94 OMAP_DSS_COLOR_UYVY,
95
96 OMAP_DSS_COLOR_GFX_OMAP3 =
97 OMAP_DSS_COLOR_CLUT1 | OMAP_DSS_COLOR_CLUT2 |
98 OMAP_DSS_COLOR_CLUT4 | OMAP_DSS_COLOR_CLUT8 |
99 OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
100 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
101 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_ARGB32 |
102 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
103
104 OMAP_DSS_COLOR_VID1_OMAP3 =
105 OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_RGB16 |
106 OMAP_DSS_COLOR_RGB24U | OMAP_DSS_COLOR_RGB24P |
107 OMAP_DSS_COLOR_YUV2 | OMAP_DSS_COLOR_UYVY,
108
109 OMAP_DSS_COLOR_VID2_OMAP3 =
110 OMAP_DSS_COLOR_RGB12U | OMAP_DSS_COLOR_ARGB16 |
111 OMAP_DSS_COLOR_RGB16 | OMAP_DSS_COLOR_RGB24U |
112 OMAP_DSS_COLOR_RGB24P | OMAP_DSS_COLOR_YUV2 |
113 OMAP_DSS_COLOR_UYVY | OMAP_DSS_COLOR_ARGB32 |
114 OMAP_DSS_COLOR_RGBA32 | OMAP_DSS_COLOR_RGBX32,
115};
116
117enum omap_lcd_display_type {
118 OMAP_DSS_LCD_DISPLAY_STN,
119 OMAP_DSS_LCD_DISPLAY_TFT,
120};
121
122enum omap_dss_load_mode {
123 OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
124 OMAP_DSS_LOAD_CLUT_ONLY = 1,
125 OMAP_DSS_LOAD_FRAME_ONLY = 2,
126 OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
127};
128
129enum omap_dss_trans_key_type {
130 OMAP_DSS_COLOR_KEY_GFX_DST = 0,
131 OMAP_DSS_COLOR_KEY_VID_SRC = 1,
132};
133
134enum omap_rfbi_te_mode {
135 OMAP_DSS_RFBI_TE_MODE_1 = 1,
136 OMAP_DSS_RFBI_TE_MODE_2 = 2,
137};
138
139enum omap_panel_config {
140 OMAP_DSS_LCD_IVS = 1<<0,
141 OMAP_DSS_LCD_IHS = 1<<1,
142 OMAP_DSS_LCD_IPC = 1<<2,
143 OMAP_DSS_LCD_IEO = 1<<3,
144 OMAP_DSS_LCD_RF = 1<<4,
145 OMAP_DSS_LCD_ONOFF = 1<<5,
146
147 OMAP_DSS_LCD_TFT = 1<<20,
148};
149
150enum omap_dss_venc_type {
151 OMAP_DSS_VENC_TYPE_COMPOSITE,
152 OMAP_DSS_VENC_TYPE_SVIDEO,
153};
154
155enum omap_display_caps {
156 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
157 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
158};
159
160enum omap_dss_update_mode {
161 OMAP_DSS_UPDATE_DISABLED = 0,
162 OMAP_DSS_UPDATE_AUTO,
163 OMAP_DSS_UPDATE_MANUAL,
164};
165
166enum omap_dss_display_state {
167 OMAP_DSS_DISPLAY_DISABLED = 0,
168 OMAP_DSS_DISPLAY_ACTIVE,
169 OMAP_DSS_DISPLAY_SUSPENDED,
170};
171
172/* XXX perhaps this should be removed */
173enum omap_dss_overlay_managers {
174 OMAP_DSS_OVL_MGR_LCD,
175 OMAP_DSS_OVL_MGR_TV,
176};
177
178enum omap_dss_rotation_type {
179 OMAP_DSS_ROT_DMA = 0,
180 OMAP_DSS_ROT_VRFB = 1,
181};
182
183/* clockwise rotation angle */
184enum omap_dss_rotation_angle {
185 OMAP_DSS_ROT_0 = 0,
186 OMAP_DSS_ROT_90 = 1,
187 OMAP_DSS_ROT_180 = 2,
188 OMAP_DSS_ROT_270 = 3,
189};
190
191enum omap_overlay_caps {
192 OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
193 OMAP_DSS_OVL_CAP_DISPC = 1 << 1,
194};
195
196enum omap_overlay_manager_caps {
197 OMAP_DSS_OVL_MGR_CAP_DISPC = 1 << 0,
198};
199
200/* RFBI */
201
202struct rfbi_timings {
203 int cs_on_time;
204 int cs_off_time;
205 int we_on_time;
206 int we_off_time;
207 int re_on_time;
208 int re_off_time;
209 int we_cycle_time;
210 int re_cycle_time;
211 int cs_pulse_width;
212 int access_time;
213
214 int clk_div;
215
216 u32 tim[5]; /* set by rfbi_convert_timings() */
217
218 int converted;
219};
220
221void omap_rfbi_write_command(const void *buf, u32 len);
222void omap_rfbi_read_data(void *buf, u32 len);
223void omap_rfbi_write_data(const void *buf, u32 len);
224void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
225 u16 x, u16 y,
226 u16 w, u16 h);
227int omap_rfbi_enable_te(bool enable, unsigned line);
228int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
229 unsigned hs_pulse_time, unsigned vs_pulse_time,
230 int hs_pol_inv, int vs_pol_inv, int extif_div);
231
232/* DSI */
233void dsi_bus_lock(void);
234void dsi_bus_unlock(void);
235int dsi_vc_dcs_write(int channel, u8 *data, int len);
236int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len);
237int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen);
238int dsi_vc_set_max_rx_packet_size(int channel, u16 len);
239int dsi_vc_send_null(int channel);
240int dsi_vc_send_bta_sync(int channel);
241
242/* Board specific data */
243struct omap_dss_board_info {
244 int (*get_last_off_on_transaction_id)(struct device *dev);
245 int num_devices;
246 struct omap_dss_device **devices;
247 struct omap_dss_device *default_device;
248};
249
250struct omap_video_timings {
251 /* Unit: pixels */
252 u16 x_res;
253 /* Unit: pixels */
254 u16 y_res;
255 /* Unit: KHz */
256 u32 pixel_clock;
257 /* Unit: pixel clocks */
258 u16 hsw; /* Horizontal synchronization pulse width */
259 /* Unit: pixel clocks */
260 u16 hfp; /* Horizontal front porch */
261 /* Unit: pixel clocks */
262 u16 hbp; /* Horizontal back porch */
263 /* Unit: line clocks */
264 u16 vsw; /* Vertical synchronization pulse width */
265 /* Unit: line clocks */
266 u16 vfp; /* Vertical front porch */
267 /* Unit: line clocks */
268 u16 vbp; /* Vertical back porch */
269};
270
271#ifdef CONFIG_OMAP2_DSS_VENC
272/* Hardcoded timings for tv modes. Venc only uses these to
273 * identify the mode, and does not actually use the configs
274 * itself. However, the configs should be something that
275 * a normal monitor can also show */
276const extern struct omap_video_timings omap_dss_pal_timings;
277const extern struct omap_video_timings omap_dss_ntsc_timings;
278#endif
279
280struct omap_overlay_info {
281 bool enabled;
282
283 u32 paddr;
284 void __iomem *vaddr;
285 u16 screen_width;
286 u16 width;
287 u16 height;
288 enum omap_color_mode color_mode;
289 u8 rotation;
290 enum omap_dss_rotation_type rotation_type;
291 bool mirror;
292
293 u16 pos_x;
294 u16 pos_y;
295 u16 out_width; /* if 0, out_width == width */
296 u16 out_height; /* if 0, out_height == height */
297 u8 global_alpha;
298};
299
300struct omap_overlay {
301 struct kobject kobj;
302 struct list_head list;
303
304 /* static fields */
305 const char *name;
306 int id;
307 enum omap_color_mode supported_modes;
308 enum omap_overlay_caps caps;
309
310 /* dynamic fields */
311 struct omap_overlay_manager *manager;
312 struct omap_overlay_info info;
313
314 /* if true, info has been changed, but not applied() yet */
315 bool info_dirty;
316
317 int (*set_manager)(struct omap_overlay *ovl,
318 struct omap_overlay_manager *mgr);
319 int (*unset_manager)(struct omap_overlay *ovl);
320
321 int (*set_overlay_info)(struct omap_overlay *ovl,
322 struct omap_overlay_info *info);
323 void (*get_overlay_info)(struct omap_overlay *ovl,
324 struct omap_overlay_info *info);
325
326 int (*wait_for_go)(struct omap_overlay *ovl);
327};
328
329struct omap_overlay_manager_info {
330 u32 default_color;
331
332 enum omap_dss_trans_key_type trans_key_type;
333 u32 trans_key;
334 bool trans_enabled;
335
336 bool alpha_enabled;
337};
338
339struct omap_overlay_manager {
340 struct kobject kobj;
341 struct list_head list;
342
343 /* static fields */
344 const char *name;
345 int id;
346 enum omap_overlay_manager_caps caps;
347 int num_overlays;
348 struct omap_overlay **overlays;
349 enum omap_display_type supported_displays;
350
351 /* dynamic fields */
352 struct omap_dss_device *device;
353 struct omap_overlay_manager_info info;
354
355 bool device_changed;
356 /* if true, info has been changed but not applied() yet */
357 bool info_dirty;
358
359 int (*set_device)(struct omap_overlay_manager *mgr,
360 struct omap_dss_device *dssdev);
361 int (*unset_device)(struct omap_overlay_manager *mgr);
362
363 int (*set_manager_info)(struct omap_overlay_manager *mgr,
364 struct omap_overlay_manager_info *info);
365 void (*get_manager_info)(struct omap_overlay_manager *mgr,
366 struct omap_overlay_manager_info *info);
367
368 int (*apply)(struct omap_overlay_manager *mgr);
369 int (*wait_for_go)(struct omap_overlay_manager *mgr);
370};
371
372struct omap_dss_device {
373 struct device dev;
374
375 enum omap_display_type type;
376
377 union {
378 struct {
379 u8 data_lines;
380 } dpi;
381
382 struct {
383 u8 channel;
384 u8 data_lines;
385 } rfbi;
386
387 struct {
388 u8 datapairs;
389 } sdi;
390
391 struct {
392 u8 clk_lane;
393 u8 clk_pol;
394 u8 data1_lane;
395 u8 data1_pol;
396 u8 data2_lane;
397 u8 data2_pol;
398
399 struct {
400 u16 regn;
401 u16 regm;
402 u16 regm3;
403 u16 regm4;
404
405 u16 lp_clk_div;
406
407 u16 lck_div;
408 u16 pck_div;
409 } div;
410
411 bool ext_te;
412 u8 ext_te_gpio;
413 } dsi;
414
415 struct {
416 enum omap_dss_venc_type type;
417 bool invert_polarity;
418 } venc;
419 } phy;
420
421 struct {
422 struct omap_video_timings timings;
423
424 int acbi; /* ac-bias pin transitions per interrupt */
425 /* Unit: line clocks */
426 int acb; /* ac-bias pin frequency */
427
428 enum omap_panel_config config;
429
430 u8 recommended_bpp;
431
432 struct omap_dss_device *ctrl;
433 } panel;
434
435 struct {
436 u8 pixel_size;
437 struct rfbi_timings rfbi_timings;
438 struct omap_dss_device *panel;
439 } ctrl;
440
441 int reset_gpio;
442
443 int max_backlight_level;
444
445 const char *name;
446
447 /* used to match device to driver */
448 const char *driver_name;
449
450 void *data;
451
452 struct omap_dss_driver *driver;
453
454 /* helper variable for driver suspend/resume */
455 bool activate_after_resume;
456
457 enum omap_display_caps caps;
458
459 struct omap_overlay_manager *manager;
460
461 enum omap_dss_display_state state;
462
463 int (*enable)(struct omap_dss_device *dssdev);
464 void (*disable)(struct omap_dss_device *dssdev);
465
466 int (*suspend)(struct omap_dss_device *dssdev);
467 int (*resume)(struct omap_dss_device *dssdev);
468
469 void (*get_resolution)(struct omap_dss_device *dssdev,
470 u16 *xres, u16 *yres);
471 int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
472
473 int (*check_timings)(struct omap_dss_device *dssdev,
474 struct omap_video_timings *timings);
475 void (*set_timings)(struct omap_dss_device *dssdev,
476 struct omap_video_timings *timings);
477 void (*get_timings)(struct omap_dss_device *dssdev,
478 struct omap_video_timings *timings);
479 int (*update)(struct omap_dss_device *dssdev,
480 u16 x, u16 y, u16 w, u16 h);
481 int (*sync)(struct omap_dss_device *dssdev);
482 int (*wait_vsync)(struct omap_dss_device *dssdev);
483
484 int (*set_update_mode)(struct omap_dss_device *dssdev,
485 enum omap_dss_update_mode);
486 enum omap_dss_update_mode (*get_update_mode)
487 (struct omap_dss_device *dssdev);
488
489 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
490 int (*get_te)(struct omap_dss_device *dssdev);
491
492 u8 (*get_rotate)(struct omap_dss_device *dssdev);
493 int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
494
495 bool (*get_mirror)(struct omap_dss_device *dssdev);
496 int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
497
498 int (*run_test)(struct omap_dss_device *dssdev, int test);
499 int (*memory_read)(struct omap_dss_device *dssdev,
500 void *buf, size_t size,
501 u16 x, u16 y, u16 w, u16 h);
502
503 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
504 u32 (*get_wss)(struct omap_dss_device *dssdev);
505
506 /* platform specific */
507 int (*platform_enable)(struct omap_dss_device *dssdev);
508 void (*platform_disable)(struct omap_dss_device *dssdev);
509 int (*set_backlight)(struct omap_dss_device *dssdev, int level);
510 int (*get_backlight)(struct omap_dss_device *dssdev);
511};
512
513struct omap_dss_driver {
514 struct device_driver driver;
515
516 int (*probe)(struct omap_dss_device *);
517 void (*remove)(struct omap_dss_device *);
518
519 int (*enable)(struct omap_dss_device *display);
520 void (*disable)(struct omap_dss_device *display);
521 int (*suspend)(struct omap_dss_device *display);
522 int (*resume)(struct omap_dss_device *display);
523 int (*run_test)(struct omap_dss_device *display, int test);
524
525 void (*setup_update)(struct omap_dss_device *dssdev,
526 u16 x, u16 y, u16 w, u16 h);
527
528 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
529 int (*wait_for_te)(struct omap_dss_device *dssdev);
530
531 u8 (*get_rotate)(struct omap_dss_device *dssdev);
532 int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
533
534 bool (*get_mirror)(struct omap_dss_device *dssdev);
535 int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
536
537 int (*memory_read)(struct omap_dss_device *dssdev,
538 void *buf, size_t size,
539 u16 x, u16 y, u16 w, u16 h);
540};
541
542int omap_dss_register_driver(struct omap_dss_driver *);
543void omap_dss_unregister_driver(struct omap_dss_driver *);
544
545int omap_dss_register_device(struct omap_dss_device *);
546void omap_dss_unregister_device(struct omap_dss_device *);
547
548void omap_dss_get_device(struct omap_dss_device *dssdev);
549void omap_dss_put_device(struct omap_dss_device *dssdev);
550#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
551struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
552struct omap_dss_device *omap_dss_find_device(void *data,
553 int (*match)(struct omap_dss_device *dssdev, void *data));
554
555int omap_dss_start_device(struct omap_dss_device *dssdev);
556void omap_dss_stop_device(struct omap_dss_device *dssdev);
557
558int omap_dss_get_num_overlay_managers(void);
559struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
560
561int omap_dss_get_num_overlays(void);
562struct omap_overlay *omap_dss_get_overlay(int num);
563
564typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
565int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
566int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
567
568int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout);
569int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
570 unsigned long timeout);
571
572#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
573#define to_dss_device(x) container_of((x), struct omap_dss_device, dev)
574
575#endif
diff --git a/arch/arm/plat-omap/include/plat/sdrc.h b/arch/arm/plat-omap/include/plat/sdrc.h
index f704030d2a70..7b76f50564ba 100644
--- a/arch/arm/plat-omap/include/plat/sdrc.h
+++ b/arch/arm/plat-omap/include/plat/sdrc.h
@@ -94,7 +94,10 @@
94 94
95/* SMS register offsets - read/write with sms_{read,write}_reg() */ 95/* SMS register offsets - read/write with sms_{read,write}_reg() */
96 96
97#define SMS_SYSCONFIG 0x010 97#define SMS_SYSCONFIG 0x010
98#define SMS_ROT_CONTROL(context) (0x180 + 0x10 * context)
99#define SMS_ROT_SIZE(context) (0x184 + 0x10 * context)
100#define SMS_ROT_PHYSICAL_BA(context) (0x188 + 0x10 * context)
98/* REVISIT: fill in other SMS registers here */ 101/* REVISIT: fill in other SMS registers here */
99 102
100 103
@@ -129,6 +132,10 @@ int omap2_sdrc_get_params(unsigned long r,
129void omap2_sms_save_context(void); 132void omap2_sms_save_context(void);
130void omap2_sms_restore_context(void); 133void omap2_sms_restore_context(void);
131 134
135void omap2_sms_write_rot_control(u32 val, unsigned ctx);
136void omap2_sms_write_rot_size(u32 val, unsigned ctx);
137void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx);
138
132#ifdef CONFIG_ARCH_OMAP2 139#ifdef CONFIG_ARCH_OMAP2
133 140
134struct memory_timings { 141struct memory_timings {
diff --git a/arch/arm/plat-omap/include/plat/vram.h b/arch/arm/plat-omap/include/plat/vram.h
new file mode 100644
index 000000000000..edd4987758a6
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/vram.h
@@ -0,0 +1,62 @@
1/*
2 * VRAM manager for OMAP
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef __OMAP_VRAM_H__
22#define __OMAP_VRAM_H__
23
24#include <linux/types.h>
25
26#define OMAP_VRAM_MEMTYPE_SDRAM 0
27#define OMAP_VRAM_MEMTYPE_SRAM 1
28#define OMAP_VRAM_MEMTYPE_MAX 1
29
30extern int omap_vram_add_region(unsigned long paddr, size_t size);
31extern int omap_vram_free(unsigned long paddr, size_t size);
32extern int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr);
33extern int omap_vram_reserve(unsigned long paddr, size_t size);
34extern void omap_vram_get_info(unsigned long *vram, unsigned long *free_vram,
35 unsigned long *largest_free_block);
36
37#ifdef CONFIG_OMAP2_VRAM
38extern void omap_vram_set_sdram_vram(u32 size, u32 start);
39extern void omap_vram_set_sram_vram(u32 size, u32 start);
40
41extern void omap_vram_reserve_sdram(void);
42extern unsigned long omap_vram_reserve_sram(unsigned long sram_pstart,
43 unsigned long sram_vstart,
44 unsigned long sram_size,
45 unsigned long pstart_avail,
46 unsigned long size_avail);
47#else
48static inline void omap_vram_set_sdram_vram(u32 size, u32 start) { }
49static inline void omap_vram_set_sram_vram(u32 size, u32 start) { }
50
51static inline void omap_vram_reserve_sdram(void) { }
52static inline unsigned long omap_vram_reserve_sram(unsigned long sram_pstart,
53 unsigned long sram_vstart,
54 unsigned long sram_size,
55 unsigned long pstart_avail,
56 unsigned long size_avail)
57{
58 return 0;
59}
60#endif
61
62#endif
diff --git a/arch/arm/plat-omap/include/plat/vrfb.h b/arch/arm/plat-omap/include/plat/vrfb.h
new file mode 100644
index 000000000000..d8a03ced3b10
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/vrfb.h
@@ -0,0 +1,50 @@
1/*
2 * VRFB Rotation Engine
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#ifndef __OMAP_VRFB_H__
22#define __OMAP_VRFB_H__
23
24#define OMAP_VRFB_LINE_LEN 2048
25
26struct vrfb {
27 u8 context;
28 void __iomem *vaddr[4];
29 unsigned long paddr[4];
30 u16 xres;
31 u16 yres;
32 u16 xoffset;
33 u16 yoffset;
34 u8 bytespp;
35 bool yuv_mode;
36};
37
38extern int omap_vrfb_request_ctx(struct vrfb *vrfb);
39extern void omap_vrfb_release_ctx(struct vrfb *vrfb);
40extern void omap_vrfb_adjust_size(u16 *width, u16 *height,
41 u8 bytespp);
42extern u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp);
43extern u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp);
44extern void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
45 u16 width, u16 height,
46 unsigned bytespp, bool yuv_mode);
47extern int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot);
48extern void omap_vrfb_restore_context(void);
49
50#endif /* __VRFB_H */
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 3e923668778d..ad2bf07d30b5 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -28,6 +28,7 @@
28#include <plat/sram.h> 28#include <plat/sram.h>
29#include <plat/board.h> 29#include <plat/board.h>
30#include <plat/cpu.h> 30#include <plat/cpu.h>
31#include <plat/vram.h>
31 32
32#include <plat/control.h> 33#include <plat/control.h>
33 34
@@ -185,6 +186,13 @@ void __init omap_detect_sram(void)
185 omap_sram_start + SRAM_BOOTLOADER_SZ, 186 omap_sram_start + SRAM_BOOTLOADER_SZ,
186 omap_sram_size - SRAM_BOOTLOADER_SZ); 187 omap_sram_size - SRAM_BOOTLOADER_SZ);
187 omap_sram_size -= reserved; 188 omap_sram_size -= reserved;
189
190 reserved = omap_vram_reserve_sram(omap_sram_start, omap_sram_base,
191 omap_sram_size,
192 omap_sram_start + SRAM_BOOTLOADER_SZ,
193 omap_sram_size - SRAM_BOOTLOADER_SZ);
194 omap_sram_size -= reserved;
195
188 omap_sram_ceil = omap_sram_base + omap_sram_size; 196 omap_sram_ceil = omap_sram_base + omap_sram_size;
189} 197}
190 198
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h
index 483d666c27c0..66a197266637 100644
--- a/arch/avr32/include/asm/syscalls.h
+++ b/arch/avr32/include/asm/syscalls.h
@@ -29,10 +29,6 @@ asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *,
29 struct pt_regs *); 29 struct pt_regs *);
30asmlinkage int sys_rt_sigreturn(struct pt_regs *); 30asmlinkage int sys_rt_sigreturn(struct pt_regs *);
31 31
32/* kernel/sys_avr32.c */
33asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
34 unsigned long, unsigned long, off_t);
35
36/* mm/cache.c */ 32/* mm/cache.c */
37asmlinkage int sys_cacheflush(int, void __user *, size_t); 33asmlinkage int sys_cacheflush(int, void __user *, size_t);
38 34
diff --git a/arch/avr32/kernel/sys_avr32.c b/arch/avr32/kernel/sys_avr32.c
index 5d2daeaf356f..459349b5ed5a 100644
--- a/arch/avr32/kernel/sys_avr32.c
+++ b/arch/avr32/kernel/sys_avr32.c
@@ -5,39 +5,8 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8#include <linux/errno.h>
9#include <linux/fs.h>
10#include <linux/file.h>
11#include <linux/mm.h>
12#include <linux/unistd.h> 8#include <linux/unistd.h>
13 9
14#include <asm/mman.h>
15#include <asm/uaccess.h>
16#include <asm/syscalls.h>
17
18asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
19 unsigned long prot, unsigned long flags,
20 unsigned long fd, off_t offset)
21{
22 int error = -EBADF;
23 struct file *file = NULL;
24
25 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
26 if (!(flags & MAP_ANONYMOUS)) {
27 file = fget(fd);
28 if (!file)
29 return error;
30 }
31
32 down_write(&current->mm->mmap_sem);
33 error = do_mmap_pgoff(file, addr, len, prot, flags, offset);
34 up_write(&current->mm->mmap_sem);
35
36 if (file)
37 fput(file);
38 return error;
39}
40
41int kernel_execve(const char *file, char **argv, char **envp) 10int kernel_execve(const char *file, char **argv, char **envp)
42{ 11{
43 register long scno asm("r8") = __NR_execve; 12 register long scno asm("r8") = __NR_execve;
diff --git a/arch/avr32/kernel/syscall-stubs.S b/arch/avr32/kernel/syscall-stubs.S
index f7244cd02fbb..0447a3e2ba64 100644
--- a/arch/avr32/kernel/syscall-stubs.S
+++ b/arch/avr32/kernel/syscall-stubs.S
@@ -61,7 +61,7 @@ __sys_execve:
61__sys_mmap2: 61__sys_mmap2:
62 pushm lr 62 pushm lr
63 st.w --sp, ARG6 63 st.w --sp, ARG6
64 call sys_mmap2 64 call sys_mmap_pgoff
65 sub sp, -4 65 sub sp, -4
66 popm pc 66 popm pc
67 67
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index afcef129d4e8..2e7f8e10bf87 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -22,39 +22,6 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/dma.h> 23#include <asm/dma.h>
24 24
25/* common code for old and new mmaps */
26static inline long
27do_mmap2(unsigned long addr, unsigned long len,
28 unsigned long prot, unsigned long flags,
29 unsigned long fd, unsigned long pgoff)
30{
31 int error = -EBADF;
32 struct file *file = NULL;
33
34 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35 if (!(flags & MAP_ANONYMOUS)) {
36 file = fget(fd);
37 if (!file)
38 goto out;
39 }
40
41 down_write(&current->mm->mmap_sem);
42 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
43 up_write(&current->mm->mmap_sem);
44
45 if (file)
46 fput(file);
47 out:
48 return error;
49}
50
51asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
52 unsigned long prot, unsigned long flags,
53 unsigned long fd, unsigned long pgoff)
54{
55 return do_mmap2(addr, len, prot, flags, fd, pgoff);
56}
57
58asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) 25asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
59{ 26{
60 return sram_alloc_with_lsl(size, flags); 27 return sram_alloc_with_lsl(size, flags);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index a50637a8b9bd..f3f8bb46b517 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -1422,7 +1422,7 @@ ENTRY(_sys_call_table)
1422 .long _sys_ni_syscall /* streams2 */ 1422 .long _sys_ni_syscall /* streams2 */
1423 .long _sys_vfork /* 190 */ 1423 .long _sys_vfork /* 190 */
1424 .long _sys_getrlimit 1424 .long _sys_getrlimit
1425 .long _sys_mmap2 1425 .long _sys_mmap_pgoff
1426 .long _sys_truncate64 1426 .long _sys_truncate64
1427 .long _sys_ftruncate64 1427 .long _sys_ftruncate64
1428 .long _sys_stat64 /* 195 */ 1428 .long _sys_stat64 /* 195 */
diff --git a/arch/cris/kernel/sys_cris.c b/arch/cris/kernel/sys_cris.c
index 2ad962c7e88e..c2bbb1ac98a9 100644
--- a/arch/cris/kernel/sys_cris.c
+++ b/arch/cris/kernel/sys_cris.c
@@ -26,31 +26,6 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include <asm/segment.h> 27#include <asm/segment.h>
28 28
29/* common code for old and new mmaps */
30static inline long
31do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
32 unsigned long flags, unsigned long fd, unsigned long pgoff)
33{
34 int error = -EBADF;
35 struct file * file = NULL;
36
37 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
38 if (!(flags & MAP_ANONYMOUS)) {
39 file = fget(fd);
40 if (!file)
41 goto out;
42 }
43
44 down_write(&current->mm->mmap_sem);
45 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
46 up_write(&current->mm->mmap_sem);
47
48 if (file)
49 fput(file);
50out:
51 return error;
52}
53
54asmlinkage unsigned long old_mmap(unsigned long __user *args) 29asmlinkage unsigned long old_mmap(unsigned long __user *args)
55{ 30{
56 unsigned long buffer[6]; 31 unsigned long buffer[6];
@@ -63,7 +38,7 @@ asmlinkage unsigned long old_mmap(unsigned long __user *args)
63 if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */ 38 if (buffer[5] & ~PAGE_MASK) /* verify that offset is on page boundary */
64 goto out; 39 goto out;
65 40
66 err = do_mmap2(buffer[0], buffer[1], buffer[2], buffer[3], 41 err = sys_mmap_pgoff(buffer[0], buffer[1], buffer[2], buffer[3],
67 buffer[4], buffer[5] >> PAGE_SHIFT); 42 buffer[4], buffer[5] >> PAGE_SHIFT);
68out: 43out:
69 return err; 44 return err;
@@ -73,7 +48,8 @@ asmlinkage long
73sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 48sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
74 unsigned long flags, unsigned long fd, unsigned long pgoff) 49 unsigned long flags, unsigned long fd, unsigned long pgoff)
75{ 50{
76 return do_mmap2(addr, len, prot, flags, fd, pgoff); 51 /* bug(?): 8Kb pages here */
52 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
77} 53}
78 54
79/* 55/*
diff --git a/arch/frv/kernel/sys_frv.c b/arch/frv/kernel/sys_frv.c
index 2b6b5289cdcc..1d3d4c9e2521 100644
--- a/arch/frv/kernel/sys_frv.c
+++ b/arch/frv/kernel/sys_frv.c
@@ -31,9 +31,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
31 unsigned long prot, unsigned long flags, 31 unsigned long prot, unsigned long flags,
32 unsigned long fd, unsigned long pgoff) 32 unsigned long fd, unsigned long pgoff)
33{ 33{
34 int error = -EBADF;
35 struct file * file = NULL;
36
37 /* As with sparc32, make sure the shift for mmap2 is constant 34 /* As with sparc32, make sure the shift for mmap2 is constant
38 (12), no matter what PAGE_SIZE we have.... */ 35 (12), no matter what PAGE_SIZE we have.... */
39 36
@@ -41,69 +38,10 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
41 trying to map something we can't */ 38 trying to map something we can't */
42 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1)) 39 if (pgoff & ((1 << (PAGE_SHIFT - 12)) - 1))
43 return -EINVAL; 40 return -EINVAL;
44 pgoff >>= PAGE_SHIFT - 12;
45
46 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
47 if (!(flags & MAP_ANONYMOUS)) {
48 file = fget(fd);
49 if (!file)
50 goto out;
51 }
52
53 down_write(&current->mm->mmap_sem);
54 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
55 up_write(&current->mm->mmap_sem);
56
57 if (file)
58 fput(file);
59out:
60 return error;
61}
62
63#if 0 /* DAVIDM - do we want this */
64struct mmap_arg_struct64 {
65 __u32 addr;
66 __u32 len;
67 __u32 prot;
68 __u32 flags;
69 __u64 offset; /* 64 bits */
70 __u32 fd;
71};
72
73asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
74{
75 int error = -EFAULT;
76 struct file * file = NULL;
77 struct mmap_arg_struct64 a;
78 unsigned long pgoff;
79
80 if (copy_from_user(&a, arg, sizeof(a)))
81 return -EFAULT;
82
83 if ((long)a.offset & ~PAGE_MASK)
84 return -EINVAL;
85
86 pgoff = a.offset >> PAGE_SHIFT;
87 if ((a.offset >> PAGE_SHIFT) != pgoff)
88 return -EINVAL;
89
90 if (!(a.flags & MAP_ANONYMOUS)) {
91 error = -EBADF;
92 file = fget(a.fd);
93 if (!file)
94 goto out;
95 }
96 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
97 41
98 down_write(&current->mm->mmap_sem); 42 return sys_mmap_pgoff(addr, len, prot, flags, fd,
99 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff); 43 pgoff >> (PAGE_SHIFT - 12));
100 up_write(&current->mm->mmap_sem);
101 if (file)
102 fput(file);
103out:
104 return error;
105} 44}
106#endif
107 45
108/* 46/*
109 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 47 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
diff --git a/arch/h8300/kernel/sys_h8300.c b/arch/h8300/kernel/sys_h8300.c
index 8cb5d73a0e35..b5969db0ca10 100644
--- a/arch/h8300/kernel/sys_h8300.c
+++ b/arch/h8300/kernel/sys_h8300.c
@@ -26,39 +26,6 @@
26#include <asm/traps.h> 26#include <asm/traps.h>
27#include <asm/unistd.h> 27#include <asm/unistd.h>
28 28
29/* common code for old and new mmaps */
30static inline long do_mmap2(
31 unsigned long addr, unsigned long len,
32 unsigned long prot, unsigned long flags,
33 unsigned long fd, unsigned long pgoff)
34{
35 int error = -EBADF;
36 struct file * file = NULL;
37
38 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
39 if (!(flags & MAP_ANONYMOUS)) {
40 file = fget(fd);
41 if (!file)
42 goto out;
43 }
44
45 down_write(&current->mm->mmap_sem);
46 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
47 up_write(&current->mm->mmap_sem);
48
49 if (file)
50 fput(file);
51out:
52 return error;
53}
54
55asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
56 unsigned long prot, unsigned long flags,
57 unsigned long fd, unsigned long pgoff)
58{
59 return do_mmap2(addr, len, prot, flags, fd, pgoff);
60}
61
62/* 29/*
63 * Perform the select(nd, in, out, ex, tv) and mmap() system 30 * Perform the select(nd, in, out, ex, tv) and mmap() system
64 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to 31 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
@@ -87,57 +54,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
87 if (a.offset & ~PAGE_MASK) 54 if (a.offset & ~PAGE_MASK)
88 goto out; 55 goto out;
89 56
90 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 57 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
91 58 a.offset >> PAGE_SHIFT);
92 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
93out:
94 return error;
95}
96
97#if 0 /* DAVIDM - do we want this */
98struct mmap_arg_struct64 {
99 __u32 addr;
100 __u32 len;
101 __u32 prot;
102 __u32 flags;
103 __u64 offset; /* 64 bits */
104 __u32 fd;
105};
106
107asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
108{
109 int error = -EFAULT;
110 struct file * file = NULL;
111 struct mmap_arg_struct64 a;
112 unsigned long pgoff;
113
114 if (copy_from_user(&a, arg, sizeof(a)))
115 return -EFAULT;
116
117 if ((long)a.offset & ~PAGE_MASK)
118 return -EINVAL;
119
120 pgoff = a.offset >> PAGE_SHIFT;
121 if ((a.offset >> PAGE_SHIFT) != pgoff)
122 return -EINVAL;
123
124 if (!(a.flags & MAP_ANONYMOUS)) {
125 error = -EBADF;
126 file = fget(a.fd);
127 if (!file)
128 goto out;
129 }
130 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
131
132 down_write(&current->mm->mmap_sem);
133 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
134 up_write(&current->mm->mmap_sem);
135 if (file)
136 fput(file);
137out: 59out:
138 return error; 60 return error;
139} 61}
140#endif
141 62
142struct sel_arg_struct { 63struct sel_arg_struct {
143 unsigned long n; 64 unsigned long n;
diff --git a/arch/h8300/kernel/syscalls.S b/arch/h8300/kernel/syscalls.S
index 4eb67faac633..2d69881eda6a 100644
--- a/arch/h8300/kernel/syscalls.S
+++ b/arch/h8300/kernel/syscalls.S
@@ -206,7 +206,7 @@ SYMBOL_NAME_LABEL(sys_call_table)
206 .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */ 206 .long SYMBOL_NAME(sys_ni_syscall) /* streams2 */
207 .long SYMBOL_NAME(sys_vfork) /* 190 */ 207 .long SYMBOL_NAME(sys_vfork) /* 190 */
208 .long SYMBOL_NAME(sys_getrlimit) 208 .long SYMBOL_NAME(sys_getrlimit)
209 .long SYMBOL_NAME(sys_mmap2) 209 .long SYMBOL_NAME(sys_mmap_pgoff)
210 .long SYMBOL_NAME(sys_truncate64) 210 .long SYMBOL_NAME(sys_truncate64)
211 .long SYMBOL_NAME(sys_ftruncate64) 211 .long SYMBOL_NAME(sys_ftruncate64)
212 .long SYMBOL_NAME(sys_stat64) /* 195 */ 212 .long SYMBOL_NAME(sys_stat64) /* 195 */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 429ec968c9ee..045b746b9808 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
858 858
859 prot = get_prot32(prot); 859 prot = get_prot32(prot);
860 860
861 if (flags & MAP_HUGETLB)
862 return -ENOMEM;
863
861#if PAGE_SHIFT > IA32_PAGE_SHIFT 864#if PAGE_SHIFT > IA32_PAGE_SHIFT
862 mutex_lock(&ia32_mmap_mutex); 865 mutex_lock(&ia32_mmap_mutex);
863 { 866 {
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54501e4..67455c2ed2b1 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
37#include <xen/interface/xen.h> 37#include <xen/interface/xen.h>
38#include <xen/interface/version.h> /* to compile feature.c */ 38#include <xen/interface/version.h> /* to compile feature.c */
39#include <xen/features.h> /* to comiple xen-netfront.c */ 39#include <xen/features.h> /* to comiple xen-netfront.c */
40#include <xen/xen.h>
40#include <asm/xen/hypercall.h> 41#include <asm/xen/hypercall.h>
41 42
42/* xen_domain_type is set before executing any C code by early_xen_setup */
43enum xen_domain_type {
44 XEN_NATIVE, /* running on bare hardware */
45 XEN_PV_DOMAIN, /* running in a PV domain */
46 XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
47};
48
49#ifdef CONFIG_XEN
50extern enum xen_domain_type xen_domain_type;
51#else
52#define xen_domain_type XEN_NATIVE
53#endif
54
55#define xen_domain() (xen_domain_type != XEN_NATIVE)
56#define xen_pv_domain() (xen_domain() && \
57 xen_domain_type == XEN_PV_DOMAIN)
58#define xen_hvm_domain() (xen_domain() && \
59 xen_domain_type == XEN_HVM_DOMAIN)
60
61#ifdef CONFIG_XEN_DOM0
62#define xen_initial_domain() (xen_pv_domain() && \
63 (xen_start_info->flags & SIF_INITDOMAIN))
64#else
65#define xen_initial_domain() (0)
66#endif
67
68
69#ifdef CONFIG_XEN 43#ifdef CONFIG_XEN
70extern struct shared_info *HYPERVISOR_shared_info; 44extern struct shared_info *HYPERVISOR_shared_info;
71extern struct start_info *xen_start_info; 45extern struct start_info *xen_start_info;
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 92ed83f34036..609d50056a6c 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -100,51 +100,7 @@ sys_getpagesize (void)
100asmlinkage unsigned long 100asmlinkage unsigned long
101ia64_brk (unsigned long brk) 101ia64_brk (unsigned long brk)
102{ 102{
103 unsigned long rlim, retval, newbrk, oldbrk; 103 unsigned long retval = sys_brk(brk);
104 struct mm_struct *mm = current->mm;
105
106 /*
107 * Most of this replicates the code in sys_brk() except for an additional safety
108 * check and the clearing of r8. However, we can't call sys_brk() because we need
109 * to acquire the mmap_sem before we can do the test...
110 */
111 down_write(&mm->mmap_sem);
112
113 if (brk < mm->end_code)
114 goto out;
115 newbrk = PAGE_ALIGN(brk);
116 oldbrk = PAGE_ALIGN(mm->brk);
117 if (oldbrk == newbrk)
118 goto set_brk;
119
120 /* Always allow shrinking brk. */
121 if (brk <= mm->brk) {
122 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
123 goto set_brk;
124 goto out;
125 }
126
127 /* Check against unimplemented/unmapped addresses: */
128 if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
129 goto out;
130
131 /* Check against rlimit.. */
132 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
133 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
134 goto out;
135
136 /* Check against existing mmap mappings. */
137 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
138 goto out;
139
140 /* Ok, looks good - let it rip. */
141 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
142 goto out;
143set_brk:
144 mm->brk = brk;
145out:
146 retval = mm->brk;
147 up_write(&mm->mmap_sem);
148 force_successful_syscall_return(); 104 force_successful_syscall_return();
149 return retval; 105 return retval;
150} 106}
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
185 return 0; 141 return 0;
186} 142}
187 143
188static inline unsigned long
189do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
190{
191 struct file *file = NULL;
192
193 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
194 if (!(flags & MAP_ANONYMOUS)) {
195 file = fget(fd);
196 if (!file)
197 return -EBADF;
198
199 if (!file->f_op || !file->f_op->mmap) {
200 addr = -ENODEV;
201 goto out;
202 }
203 }
204
205 /* Careful about overflows.. */
206 len = PAGE_ALIGN(len);
207 if (!len || len > TASK_SIZE) {
208 addr = -EINVAL;
209 goto out;
210 }
211
212 down_write(&current->mm->mmap_sem);
213 addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
214 up_write(&current->mm->mmap_sem);
215
216out: if (file)
217 fput(file);
218 return addr;
219}
220
221/* 144/*
222 * mmap2() is like mmap() except that the offset is expressed in units 145 * mmap2() is like mmap() except that the offset is expressed in units
223 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces 146 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
@@ -226,7 +149,7 @@ out: if (file)
226asmlinkage unsigned long 149asmlinkage unsigned long
227sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) 150sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
228{ 151{
229 addr = do_mmap2(addr, len, prot, flags, fd, pgoff); 152 addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
230 if (!IS_ERR((void *) addr)) 153 if (!IS_ERR((void *) addr))
231 force_successful_syscall_return(); 154 force_successful_syscall_return();
232 return addr; 155 return addr;
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
238 if (offset_in_page(off) != 0) 161 if (offset_in_page(off) != 0)
239 return -EINVAL; 162 return -EINVAL;
240 163
241 addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); 164 addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
242 if (!IS_ERR((void *) addr)) 165 if (!IS_ERR((void *) addr))
243 force_successful_syscall_return(); 166 force_successful_syscall_return();
244 return addr; 167 return addr;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index c0fca2c1c858..df639db779f9 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -131,6 +131,7 @@ alloc_pci_controller (int seg)
131} 131}
132 132
133struct pci_root_info { 133struct pci_root_info {
134 struct acpi_device *bridge;
134 struct pci_controller *controller; 135 struct pci_controller *controller;
135 char *name; 136 char *name;
136}; 137};
@@ -297,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
297 window->offset = offset; 298 window->offset = offset;
298 299
299 if (insert_resource(root, &window->resource)) { 300 if (insert_resource(root, &window->resource)) {
300 printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", 301 dev_err(&info->bridge->dev,
301 window->resource.start, window->resource.end, 302 "can't allocate host bridge window %pR\n",
302 root->name, info->name); 303 &window->resource);
304 } else {
305 if (offset)
306 dev_info(&info->bridge->dev, "host bridge window %pR "
307 "(PCI address [%#llx-%#llx])\n",
308 &window->resource,
309 window->resource.start - offset,
310 window->resource.end - offset);
311 else
312 dev_info(&info->bridge->dev,
313 "host bridge window %pR\n",
314 &window->resource);
303 } 315 }
304 316
305 return AE_OK; 317 return AE_OK;
@@ -319,8 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
319 (res->end - res->start < 16)) 331 (res->end - res->start < 16))
320 continue; 332 continue;
321 if (j >= PCI_BUS_NUM_RESOURCES) { 333 if (j >= PCI_BUS_NUM_RESOURCES) {
322 printk("Ignoring range [%#llx-%#llx] (%lx)\n", 334 dev_warn(&bus->dev,
323 res->start, res->end, res->flags); 335 "ignoring host bridge window %pR (no space)\n",
336 res);
324 continue; 337 continue;
325 } 338 }
326 bus->resource[j++] = res; 339 bus->resource[j++] = res;
@@ -364,6 +377,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
364 goto out3; 377 goto out3;
365 378
366 sprintf(name, "PCI Bus %04x:%02x", domain, bus); 379 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
380 info.bridge = device;
367 info.controller = controller; 381 info.controller = controller;
368 info.name = name; 382 info.name = name;
369 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 383 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
@@ -720,9 +734,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
720 return ret; 734 return ret;
721} 735}
722 736
723/* It's defined in drivers/pci/pci.c */
724extern u8 pci_cache_line_size;
725
726/** 737/**
727 * set_pci_cacheline_size - determine cacheline size for PCI devices 738 * set_pci_cacheline_size - determine cacheline size for PCI devices
728 * 739 *
@@ -731,7 +742,7 @@ extern u8 pci_cache_line_size;
731 * 742 *
732 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). 743 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
733 */ 744 */
734static void __init set_pci_cacheline_size(void) 745static void __init set_pci_dfl_cacheline_size(void)
735{ 746{
736 unsigned long levels, unique_caches; 747 unsigned long levels, unique_caches;
737 long status; 748 long status;
@@ -751,7 +762,7 @@ static void __init set_pci_cacheline_size(void)
751 "(status=%ld)\n", __func__, status); 762 "(status=%ld)\n", __func__, status);
752 return; 763 return;
753 } 764 }
754 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 765 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
755} 766}
756 767
757u64 ia64_dma_get_required_mask(struct device *dev) 768u64 ia64_dma_get_required_mask(struct device *dev)
@@ -782,7 +793,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
782 793
783static int __init pcibios_init(void) 794static int __init pcibios_init(void)
784{ 795{
785 set_pci_cacheline_size(); 796 set_pci_dfl_cacheline_size();
786 return 0; 797 return 0;
787} 798}
788 799
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c
index 305ac852bbed..d3c865c5a6ba 100644
--- a/arch/m32r/kernel/sys_m32r.c
+++ b/arch/m32r/kernel/sys_m32r.c
@@ -76,30 +76,6 @@ asmlinkage int sys_tas(int __user *addr)
76 return oldval; 76 return oldval;
77} 77}
78 78
79asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
80 unsigned long prot, unsigned long flags,
81 unsigned long fd, unsigned long pgoff)
82{
83 int error = -EBADF;
84 struct file *file = NULL;
85
86 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
87 if (!(flags & MAP_ANONYMOUS)) {
88 file = fget(fd);
89 if (!file)
90 goto out;
91 }
92
93 down_write(&current->mm->mmap_sem);
94 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
95 up_write(&current->mm->mmap_sem);
96
97 if (file)
98 fput(file);
99out:
100 return error;
101}
102
103/* 79/*
104 * sys_ipc() is the de-multiplexer for the SysV IPC calls.. 80 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
105 * 81 *
diff --git a/arch/m32r/kernel/syscall_table.S b/arch/m32r/kernel/syscall_table.S
index aa3bf4cfab37..60536e271233 100644
--- a/arch/m32r/kernel/syscall_table.S
+++ b/arch/m32r/kernel/syscall_table.S
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
191 .long sys_ni_syscall /* streams2 */ 191 .long sys_ni_syscall /* streams2 */
192 .long sys_vfork /* 190 */ 192 .long sys_vfork /* 190 */
193 .long sys_getrlimit 193 .long sys_getrlimit
194 .long sys_mmap2 194 .long sys_mmap_pgoff
195 .long sys_truncate64 195 .long sys_truncate64
196 .long sys_ftruncate64 196 .long sys_ftruncate64
197 .long sys_stat64 /* 195 */ 197 .long sys_stat64 /* 195 */
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 7deb402bfc75..218f441de667 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -29,37 +29,16 @@
29#include <asm/page.h> 29#include <asm/page.h>
30#include <asm/unistd.h> 30#include <asm/unistd.h>
31 31
32/* common code for old and new mmaps */
33static inline long do_mmap2(
34 unsigned long addr, unsigned long len,
35 unsigned long prot, unsigned long flags,
36 unsigned long fd, unsigned long pgoff)
37{
38 int error = -EBADF;
39 struct file * file = NULL;
40
41 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
42 if (!(flags & MAP_ANONYMOUS)) {
43 file = fget(fd);
44 if (!file)
45 goto out;
46 }
47
48 down_write(&current->mm->mmap_sem);
49 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
50 up_write(&current->mm->mmap_sem);
51
52 if (file)
53 fput(file);
54out:
55 return error;
56}
57
58asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 32asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
59 unsigned long prot, unsigned long flags, 33 unsigned long prot, unsigned long flags,
60 unsigned long fd, unsigned long pgoff) 34 unsigned long fd, unsigned long pgoff)
61{ 35{
62 return do_mmap2(addr, len, prot, flags, fd, pgoff); 36 /*
37 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
38 * so we need to shift the argument down by 1; m68k mmap64(3)
39 * (in libc) expects the last argument of mmap2 in 4Kb units.
40 */
41 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
63} 42}
64 43
65/* 44/*
@@ -90,57 +69,11 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
90 if (a.offset & ~PAGE_MASK) 69 if (a.offset & ~PAGE_MASK)
91 goto out; 70 goto out;
92 71
93 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 72 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
94 73 a.offset >> PAGE_SHIFT);
95 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
96out:
97 return error;
98}
99
100#if 0
101struct mmap_arg_struct64 {
102 __u32 addr;
103 __u32 len;
104 __u32 prot;
105 __u32 flags;
106 __u64 offset; /* 64 bits */
107 __u32 fd;
108};
109
110asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
111{
112 int error = -EFAULT;
113 struct file * file = NULL;
114 struct mmap_arg_struct64 a;
115 unsigned long pgoff;
116
117 if (copy_from_user(&a, arg, sizeof(a)))
118 return -EFAULT;
119
120 if ((long)a.offset & ~PAGE_MASK)
121 return -EINVAL;
122
123 pgoff = a.offset >> PAGE_SHIFT;
124 if ((a.offset >> PAGE_SHIFT) != pgoff)
125 return -EINVAL;
126
127 if (!(a.flags & MAP_ANONYMOUS)) {
128 error = -EBADF;
129 file = fget(a.fd);
130 if (!file)
131 goto out;
132 }
133 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
134
135 down_write(&current->mm->mmap_sem);
136 error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
137 up_write(&current->mm->mmap_sem);
138 if (file)
139 fput(file);
140out: 74out:
141 return error; 75 return error;
142} 76}
143#endif
144 77
145struct sel_arg_struct { 78struct sel_arg_struct {
146 unsigned long n; 79 unsigned long n;
diff --git a/arch/m68knommu/kernel/sys_m68k.c b/arch/m68knommu/kernel/sys_m68k.c
index efdd090778a3..b67cbc735a9b 100644
--- a/arch/m68knommu/kernel/sys_m68k.c
+++ b/arch/m68knommu/kernel/sys_m68k.c
@@ -27,39 +27,6 @@
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/unistd.h> 28#include <asm/unistd.h>
29 29
30/* common code for old and new mmaps */
31static inline long do_mmap2(
32 unsigned long addr, unsigned long len,
33 unsigned long prot, unsigned long flags,
34 unsigned long fd, unsigned long pgoff)
35{
36 int error = -EBADF;
37 struct file * file = NULL;
38
39 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
40 if (!(flags & MAP_ANONYMOUS)) {
41 file = fget(fd);
42 if (!file)
43 goto out;
44 }
45
46 down_write(&current->mm->mmap_sem);
47 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
48 up_write(&current->mm->mmap_sem);
49
50 if (file)
51 fput(file);
52out:
53 return error;
54}
55
56asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
57 unsigned long prot, unsigned long flags,
58 unsigned long fd, unsigned long pgoff)
59{
60 return do_mmap2(addr, len, prot, flags, fd, pgoff);
61}
62
63/* 30/*
64 * Perform the select(nd, in, out, ex, tv) and mmap() system 31 * Perform the select(nd, in, out, ex, tv) and mmap() system
65 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to 32 * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
@@ -88,9 +55,8 @@ asmlinkage int old_mmap(struct mmap_arg_struct *arg)
88 if (a.offset & ~PAGE_MASK) 55 if (a.offset & ~PAGE_MASK)
89 goto out; 56 goto out;
90 57
91 a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 58 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
92 59 a.offset >> PAGE_SHIFT);
93 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
94out: 60out:
95 return error; 61 return error;
96} 62}
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 23535cc415ae..486837efa3d7 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -210,7 +210,7 @@ ENTRY(sys_call_table)
210 .long sys_ni_syscall /* streams2 */ 210 .long sys_ni_syscall /* streams2 */
211 .long sys_vfork /* 190 */ 211 .long sys_vfork /* 190 */
212 .long sys_getrlimit 212 .long sys_getrlimit
213 .long sys_mmap2 213 .long sys_mmap_pgoff
214 .long sys_truncate64 214 .long sys_truncate64
215 .long sys_ftruncate64 215 .long sys_ftruncate64
216 .long sys_stat64 /* 195 */ 216 .long sys_stat64 /* 195 */
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
index 07cabed4b947..9f3c205fb75b 100644
--- a/arch/microblaze/kernel/sys_microblaze.c
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -62,46 +62,14 @@ out:
62 return error; 62 return error;
63} 63}
64 64
65asmlinkage long
66sys_mmap2(unsigned long addr, unsigned long len,
67 unsigned long prot, unsigned long flags,
68 unsigned long fd, unsigned long pgoff)
69{
70 struct file *file = NULL;
71 int ret = -EBADF;
72
73 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
74 if (!(flags & MAP_ANONYMOUS)) {
75 file = fget(fd);
76 if (!file) {
77 printk(KERN_INFO "no fd in mmap\r\n");
78 goto out;
79 }
80 }
81
82 down_write(&current->mm->mmap_sem);
83 ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
84 up_write(&current->mm->mmap_sem);
85 if (file)
86 fput(file);
87out:
88 return ret;
89}
90
91asmlinkage long sys_mmap(unsigned long addr, unsigned long len, 65asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
92 unsigned long prot, unsigned long flags, 66 unsigned long prot, unsigned long flags,
93 unsigned long fd, off_t pgoff) 67 unsigned long fd, off_t pgoff)
94{ 68{
95 int err = -EINVAL; 69 if (pgoff & ~PAGE_MASK)
96 70 return -EINVAL;
97 if (pgoff & ~PAGE_MASK) {
98 printk(KERN_INFO "no pagemask in mmap\r\n");
99 goto out;
100 }
101 71
102 err = sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); 72 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
103out:
104 return err;
105} 73}
106 74
107/* 75/*
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index c1ab1dc10898..b96f365ea6b1 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -196,7 +196,7 @@ ENTRY(sys_call_table)
196 .long sys_ni_syscall /* reserved for streams2 */ 196 .long sys_ni_syscall /* reserved for streams2 */
197 .long sys_vfork /* 190 */ 197 .long sys_vfork /* 190 */
198 .long sys_getrlimit 198 .long sys_getrlimit
199 .long sys_mmap2 /* mmap2 */ 199 .long sys_mmap_pgoff /* mmap2 */
200 .long sys_truncate64 200 .long sys_truncate64
201 .long sys_ftruncate64 201 .long sys_ftruncate64
202 .long sys_stat64 /* 195 */ 202 .long sys_stat64 /* 195 */
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 1a2793efdc4e..f042563c924f 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -67,28 +67,13 @@ SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
67 unsigned long, prot, unsigned long, flags, unsigned long, fd, 67 unsigned long, prot, unsigned long, flags, unsigned long, fd,
68 unsigned long, pgoff) 68 unsigned long, pgoff)
69{ 69{
70 struct file * file = NULL;
71 unsigned long error; 70 unsigned long error;
72 71
73 error = -EINVAL; 72 error = -EINVAL;
74 if (pgoff & (~PAGE_MASK >> 12)) 73 if (pgoff & (~PAGE_MASK >> 12))
75 goto out; 74 goto out;
76 pgoff >>= PAGE_SHIFT-12; 75 error = sys_mmap_pgoff(addr, len, prot, flags, fd,
77 76 pgoff >> (PAGE_SHIFT-12));
78 if (!(flags & MAP_ANONYMOUS)) {
79 error = -EBADF;
80 file = fget(fd);
81 if (!file)
82 goto out;
83 }
84 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
85
86 down_write(&current->mm->mmap_sem);
87 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
88 up_write(&current->mm->mmap_sem);
89 if (file)
90 fput(file);
91
92out: 77out:
93 return error; 78 return error;
94} 79}
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index fe0d79805603..3f7f466190b4 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -93,7 +93,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
93 * We do not accept a shared mapping if it would violate 93 * We do not accept a shared mapping if it would violate
94 * cache aliasing constraints. 94 * cache aliasing constraints.
95 */ 95 */
96 if ((flags & MAP_SHARED) && (addr & shm_align_mask)) 96 if ((flags & MAP_SHARED) &&
97 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
97 return -EINVAL; 98 return -EINVAL;
98 return addr; 99 return addr;
99 } 100 }
@@ -129,31 +130,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
129 } 130 }
130} 131}
131 132
132/* common code for old and new mmaps */
133static inline unsigned long
134do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
135 unsigned long flags, unsigned long fd, unsigned long pgoff)
136{
137 unsigned long error = -EBADF;
138 struct file * file = NULL;
139
140 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
141 if (!(flags & MAP_ANONYMOUS)) {
142 file = fget(fd);
143 if (!file)
144 goto out;
145 }
146
147 down_write(&current->mm->mmap_sem);
148 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
149 up_write(&current->mm->mmap_sem);
150
151 if (file)
152 fput(file);
153out:
154 return error;
155}
156
157SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, 133SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
158 unsigned long, prot, unsigned long, flags, unsigned long, 134 unsigned long, prot, unsigned long, flags, unsigned long,
159 fd, off_t, offset) 135 fd, off_t, offset)
@@ -164,7 +140,7 @@ SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
164 if (offset & ~PAGE_MASK) 140 if (offset & ~PAGE_MASK)
165 goto out; 141 goto out;
166 142
167 result = do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 143 result = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
168 144
169out: 145out:
170 return result; 146 return result;
@@ -177,7 +153,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
177 if (pgoff & (~PAGE_MASK >> 12)) 153 if (pgoff & (~PAGE_MASK >> 12))
178 return -EINVAL; 154 return -EINVAL;
179 155
180 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12)); 156 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT-12));
181} 157}
182 158
183save_static_function(sys_fork); 159save_static_function(sys_fork);
diff --git a/arch/mn10300/include/asm/mman.h b/arch/mn10300/include/asm/mman.h
index 8eebf89f5ab1..db5c53da73ce 100644
--- a/arch/mn10300/include/asm/mman.h
+++ b/arch/mn10300/include/asm/mman.h
@@ -1 +1,6 @@
1#include <asm-generic/mman.h> 1#include <asm-generic/mman.h>
2
3#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
4
5#define arch_mmap_check(addr, len, flags) \
6 (((flags) & MAP_FIXED && (addr) < MIN_MAP_ADDR) ? -EINVAL : 0)
diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S
index a94e7ea3faa6..c9ee6c009d79 100644
--- a/arch/mn10300/kernel/entry.S
+++ b/arch/mn10300/kernel/entry.S
@@ -578,7 +578,7 @@ ENTRY(sys_call_table)
578 .long sys_ni_syscall /* reserved for streams2 */ 578 .long sys_ni_syscall /* reserved for streams2 */
579 .long sys_vfork /* 190 */ 579 .long sys_vfork /* 190 */
580 .long sys_getrlimit 580 .long sys_getrlimit
581 .long sys_mmap2 581 .long sys_mmap_pgoff
582 .long sys_truncate64 582 .long sys_truncate64
583 .long sys_ftruncate64 583 .long sys_ftruncate64
584 .long sys_stat64 /* 195 */ 584 .long sys_stat64 /* 195 */
diff --git a/arch/mn10300/kernel/sys_mn10300.c b/arch/mn10300/kernel/sys_mn10300.c
index 8ca5af00334c..17cc6ce04e84 100644
--- a/arch/mn10300/kernel/sys_mn10300.c
+++ b/arch/mn10300/kernel/sys_mn10300.c
@@ -23,47 +23,13 @@
23 23
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25 25
26#define MIN_MAP_ADDR PAGE_SIZE /* minimum fixed mmap address */
27
28/*
29 * memory mapping syscall
30 */
31asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
32 unsigned long prot, unsigned long flags,
33 unsigned long fd, unsigned long pgoff)
34{
35 struct file *file = NULL;
36 long error = -EINVAL;
37
38 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
39
40 if (flags & MAP_FIXED && addr < MIN_MAP_ADDR)
41 goto out;
42
43 error = -EBADF;
44 if (!(flags & MAP_ANONYMOUS)) {
45 file = fget(fd);
46 if (!file)
47 goto out;
48 }
49
50 down_write(&current->mm->mmap_sem);
51 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
52 up_write(&current->mm->mmap_sem);
53
54 if (file)
55 fput(file);
56out:
57 return error;
58}
59
60asmlinkage long old_mmap(unsigned long addr, unsigned long len, 26asmlinkage long old_mmap(unsigned long addr, unsigned long len,
61 unsigned long prot, unsigned long flags, 27 unsigned long prot, unsigned long flags,
62 unsigned long fd, unsigned long offset) 28 unsigned long fd, unsigned long offset)
63{ 29{
64 if (offset & ~PAGE_MASK) 30 if (offset & ~PAGE_MASK)
65 return -EINVAL; 31 return -EINVAL;
66 return sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 32 return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
67} 33}
68 34
69struct sel_arg_struct { 35struct sel_arg_struct {
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index 71b31957c8f1..9147391afb03 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -110,37 +110,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
110 return addr; 110 return addr;
111} 111}
112 112
113static unsigned long do_mmap2(unsigned long addr, unsigned long len,
114 unsigned long prot, unsigned long flags, unsigned long fd,
115 unsigned long pgoff)
116{
117 struct file * file = NULL;
118 unsigned long error = -EBADF;
119 if (!(flags & MAP_ANONYMOUS)) {
120 file = fget(fd);
121 if (!file)
122 goto out;
123 }
124
125 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
126
127 down_write(&current->mm->mmap_sem);
128 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
129 up_write(&current->mm->mmap_sem);
130
131 if (file != NULL)
132 fput(file);
133out:
134 return error;
135}
136
137asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, 113asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
138 unsigned long prot, unsigned long flags, unsigned long fd, 114 unsigned long prot, unsigned long flags, unsigned long fd,
139 unsigned long pgoff) 115 unsigned long pgoff)
140{ 116{
141 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE 117 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
142 we have. */ 118 we have. */
143 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); 119 return sys_mmap_pgoff(addr, len, prot, flags, fd,
120 pgoff >> (PAGE_SHIFT - 12));
144} 121}
145 122
146asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, 123asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
@@ -148,7 +125,8 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
148 unsigned long offset) 125 unsigned long offset)
149{ 126{
150 if (!(offset & ~PAGE_MASK)) { 127 if (!(offset & ~PAGE_MASK)) {
151 return do_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 128 return sys_mmap_pgoff(addr, len, prot, flags, fd,
129 offset >> PAGE_SHIFT);
152 } else { 130 } else {
153 return -EINVAL; 131 return -EINVAL;
154 } 132 }
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index c04832c4a02e..3370e62e43d4 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -140,7 +140,6 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
140 unsigned long prot, unsigned long flags, 140 unsigned long prot, unsigned long flags,
141 unsigned long fd, unsigned long off, int shift) 141 unsigned long fd, unsigned long off, int shift)
142{ 142{
143 struct file * file = NULL;
144 unsigned long ret = -EINVAL; 143 unsigned long ret = -EINVAL;
145 144
146 if (!arch_validate_prot(prot)) 145 if (!arch_validate_prot(prot))
@@ -151,20 +150,8 @@ static inline unsigned long do_mmap2(unsigned long addr, size_t len,
151 goto out; 150 goto out;
152 off >>= shift; 151 off >>= shift;
153 } 152 }
154
155 ret = -EBADF;
156 if (!(flags & MAP_ANONYMOUS)) {
157 if (!(file = fget(fd)))
158 goto out;
159 }
160
161 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
162 153
163 down_write(&current->mm->mmap_sem); 154 ret = sys_mmap_pgoff(addr, len, prot, flags, fd, off);
164 ret = do_mmap_pgoff(file, addr, len, prot, flags, off);
165 up_write(&current->mm->mmap_sem);
166 if (file)
167 fput(file);
168out: 155out:
169 return ret; 156 return ret;
170} 157}
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 25c31d681402..22c9e557bb22 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -624,38 +624,6 @@ struct mmap_arg_struct_emu31 {
624 u32 offset; 624 u32 offset;
625}; 625};
626 626
627/* common code for old and new mmaps */
628static inline long do_mmap2(
629 unsigned long addr, unsigned long len,
630 unsigned long prot, unsigned long flags,
631 unsigned long fd, unsigned long pgoff)
632{
633 struct file * file = NULL;
634 unsigned long error = -EBADF;
635
636 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
637 if (!(flags & MAP_ANONYMOUS)) {
638 file = fget(fd);
639 if (!file)
640 goto out;
641 }
642
643 down_write(&current->mm->mmap_sem);
644 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
645 if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
646 /* Result is out of bounds. */
647 do_munmap(current->mm, addr, len);
648 error = -ENOMEM;
649 }
650 up_write(&current->mm->mmap_sem);
651
652 if (file)
653 fput(file);
654out:
655 return error;
656}
657
658
659asmlinkage unsigned long 627asmlinkage unsigned long
660old32_mmap(struct mmap_arg_struct_emu31 __user *arg) 628old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
661{ 629{
@@ -669,7 +637,8 @@ old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
669 if (a.offset & ~PAGE_MASK) 637 if (a.offset & ~PAGE_MASK)
670 goto out; 638 goto out;
671 639
672 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 640 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
641 a.offset >> PAGE_SHIFT);
673out: 642out:
674 return error; 643 return error;
675} 644}
@@ -682,7 +651,7 @@ sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
682 651
683 if (copy_from_user(&a, arg, sizeof(a))) 652 if (copy_from_user(&a, arg, sizeof(a)))
684 goto out; 653 goto out;
685 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); 654 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
686out: 655out:
687 return error; 656 return error;
688} 657}
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index e9d94f61d500..86a74c9c9e63 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -32,32 +32,6 @@
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include "entry.h" 33#include "entry.h"
34 34
35/* common code for old and new mmaps */
36static inline long do_mmap2(
37 unsigned long addr, unsigned long len,
38 unsigned long prot, unsigned long flags,
39 unsigned long fd, unsigned long pgoff)
40{
41 long error = -EBADF;
42 struct file * file = NULL;
43
44 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
45 if (!(flags & MAP_ANONYMOUS)) {
46 file = fget(fd);
47 if (!file)
48 goto out;
49 }
50
51 down_write(&current->mm->mmap_sem);
52 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
53 up_write(&current->mm->mmap_sem);
54
55 if (file)
56 fput(file);
57out:
58 return error;
59}
60
61/* 35/*
62 * Perform the select(nd, in, out, ex, tv) and mmap() system 36 * Perform the select(nd, in, out, ex, tv) and mmap() system
63 * calls. Linux for S/390 isn't able to handle more than 5 37 * calls. Linux for S/390 isn't able to handle more than 5
@@ -81,7 +55,7 @@ SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg)
81 55
82 if (copy_from_user(&a, arg, sizeof(a))) 56 if (copy_from_user(&a, arg, sizeof(a)))
83 goto out; 57 goto out;
84 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); 58 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
85out: 59out:
86 return error; 60 return error;
87} 61}
@@ -98,7 +72,7 @@ SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
98 if (a.offset & ~PAGE_MASK) 72 if (a.offset & ~PAGE_MASK)
99 goto out; 73 goto out;
100 74
101 error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); 75 error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
102out: 76out:
103 return error; 77 return error;
104} 78}
diff --git a/arch/score/kernel/sys_score.c b/arch/score/kernel/sys_score.c
index 001249469866..856ed68a58e6 100644
--- a/arch/score/kernel/sys_score.c
+++ b/arch/score/kernel/sys_score.c
@@ -36,34 +36,16 @@ asmlinkage long
36sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 36sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
37 unsigned long flags, unsigned long fd, unsigned long pgoff) 37 unsigned long flags, unsigned long fd, unsigned long pgoff)
38{ 38{
39 int error = -EBADF; 39 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
40 struct file *file = NULL;
41
42 if (pgoff & (~PAGE_MASK >> 12))
43 return -EINVAL;
44
45 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
46 if (!(flags & MAP_ANONYMOUS)) {
47 file = fget(fd);
48 if (!file)
49 return error;
50 }
51
52 down_write(&current->mm->mmap_sem);
53 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
54 up_write(&current->mm->mmap_sem);
55
56 if (file)
57 fput(file);
58
59 return error;
60} 40}
61 41
62asmlinkage long 42asmlinkage long
63sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, 43sys_mmap(unsigned long addr, unsigned long len, unsigned long prot,
64 unsigned long flags, unsigned long fd, off_t pgoff) 44 unsigned long flags, unsigned long fd, off_t offset)
65{ 45{
66 return sys_mmap2(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT); 46 if (unlikely(offset & ~PAGE_MASK))
47 return -EINVAL;
48 return sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
67} 49}
68 50
69asmlinkage long 51asmlinkage long
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 8aa5d1ceaf14..71399cde03b5 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -28,37 +28,13 @@
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/cachectl.h> 29#include <asm/cachectl.h>
30 30
31static inline long
32do_mmap2(unsigned long addr, unsigned long len, unsigned long prot,
33 unsigned long flags, int fd, unsigned long pgoff)
34{
35 int error = -EBADF;
36 struct file *file = NULL;
37
38 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
39 if (!(flags & MAP_ANONYMOUS)) {
40 file = fget(fd);
41 if (!file)
42 goto out;
43 }
44
45 down_write(&current->mm->mmap_sem);
46 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
47 up_write(&current->mm->mmap_sem);
48
49 if (file)
50 fput(file);
51out:
52 return error;
53}
54
55asmlinkage int old_mmap(unsigned long addr, unsigned long len, 31asmlinkage int old_mmap(unsigned long addr, unsigned long len,
56 unsigned long prot, unsigned long flags, 32 unsigned long prot, unsigned long flags,
57 int fd, unsigned long off) 33 int fd, unsigned long off)
58{ 34{
59 if (off & ~PAGE_MASK) 35 if (off & ~PAGE_MASK)
60 return -EINVAL; 36 return -EINVAL;
61 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT); 37 return sys_mmap_pgoff(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
62} 38}
63 39
64asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, 40asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
@@ -74,7 +50,7 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
74 50
75 pgoff >>= PAGE_SHIFT - 12; 51 pgoff >>= PAGE_SHIFT - 12;
76 52
77 return do_mmap2(addr, len, prot, flags, fd, pgoff); 53 return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
78} 54}
79 55
80/* 56/*
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index d2984fa42d3d..afeb710ec5c3 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -54,7 +54,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
54 /* We do not accept a shared mapping if it would violate 54 /* We do not accept a shared mapping if it would violate
55 * cache aliasing constraints. 55 * cache aliasing constraints.
56 */ 56 */
57 if ((flags & MAP_SHARED) && (addr & shm_align_mask)) 57 if ((flags & MAP_SHARED) &&
58 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
58 return -EINVAL; 59 return -EINVAL;
59 return addr; 60 return addr;
60 } 61 }
diff --git a/arch/sparc/include/asm/pci_64.h b/arch/sparc/include/asm/pci_64.h
index b63e51c3c3ee..b0576df6ec83 100644
--- a/arch/sparc/include/asm/pci_64.h
+++ b/arch/sparc/include/asm/pci_64.h
@@ -16,8 +16,6 @@
16 16
17#define PCI_IRQ_NONE 0xffffffff 17#define PCI_IRQ_NONE 0xffffffff
18 18
19#define PCI_CACHE_LINE_BYTES 64
20
21static inline void pcibios_set_master(struct pci_dev *dev) 19static inline void pcibios_set_master(struct pci_dev *dev)
22{ 20{
23 /* No special bus mastering setup handling */ 21 /* No special bus mastering setup handling */
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index c68648662802..b85374f7cf94 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -1081,3 +1081,10 @@ void pci_resource_to_user(const struct pci_dev *pdev, int bar,
1081 *start = rp->start - offset; 1081 *start = rp->start - offset;
1082 *end = rp->end - offset; 1082 *end = rp->end - offset;
1083} 1083}
1084
1085static int __init pcibios_init(void)
1086{
1087 pci_dfl_cache_line_size = 64 >> 2;
1088 return 0;
1089}
1090subsys_initcall(pcibios_init);
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 00abe87e5b51..dc0ac197e7e2 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -564,28 +564,6 @@ asmlinkage long sparc32_open(const char __user *filename,
564 return do_sys_open(AT_FDCWD, filename, flags, mode); 564 return do_sys_open(AT_FDCWD, filename, flags, mode);
565} 565}
566 566
567extern unsigned long do_mremap(unsigned long addr,
568 unsigned long old_len, unsigned long new_len,
569 unsigned long flags, unsigned long new_addr);
570
571asmlinkage unsigned long sys32_mremap(unsigned long addr,
572 unsigned long old_len, unsigned long new_len,
573 unsigned long flags, u32 __new_addr)
574{
575 unsigned long ret = -EINVAL;
576 unsigned long new_addr = __new_addr;
577
578 if (unlikely(sparc_mmap_check(addr, old_len)))
579 goto out;
580 if (unlikely(sparc_mmap_check(new_addr, new_len)))
581 goto out;
582 down_write(&current->mm->mmap_sem);
583 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
584 up_write(&current->mm->mmap_sem);
585out:
586 return ret;
587}
588
589long sys32_lookup_dcookie(unsigned long cookie_high, 567long sys32_lookup_dcookie(unsigned long cookie_high,
590 unsigned long cookie_low, 568 unsigned long cookie_low,
591 char __user *buf, size_t len) 569 char __user *buf, size_t len)
diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
index 03035c852a43..3a82e65d8db2 100644
--- a/arch/sparc/kernel/sys_sparc_32.c
+++ b/arch/sparc/kernel/sys_sparc_32.c
@@ -45,7 +45,8 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
45 /* We do not accept a shared mapping if it would violate 45 /* We do not accept a shared mapping if it would violate
46 * cache aliasing constraints. 46 * cache aliasing constraints.
47 */ 47 */
48 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1))) 48 if ((flags & MAP_SHARED) &&
49 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
49 return -EINVAL; 50 return -EINVAL;
50 return addr; 51 return addr;
51 } 52 }
@@ -79,15 +80,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
79 } 80 }
80} 81}
81 82
82asmlinkage unsigned long sparc_brk(unsigned long brk)
83{
84 if(ARCH_SUN4C) {
85 if ((brk & 0xe0000000) != (current->mm->brk & 0xe0000000))
86 return current->mm->brk;
87 }
88 return sys_brk(brk);
89}
90
91/* 83/*
92 * sys_pipe() is the normal C calling standard for creating 84 * sys_pipe() is the normal C calling standard for creating
93 * a pipe. It's not the way unix traditionally does this, though. 85 * a pipe. It's not the way unix traditionally does this, though.
@@ -234,31 +226,6 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
234} 226}
235 227
236/* Linux version of mmap */ 228/* Linux version of mmap */
237static unsigned long do_mmap2(unsigned long addr, unsigned long len,
238 unsigned long prot, unsigned long flags, unsigned long fd,
239 unsigned long pgoff)
240{
241 struct file * file = NULL;
242 unsigned long retval = -EBADF;
243
244 if (!(flags & MAP_ANONYMOUS)) {
245 file = fget(fd);
246 if (!file)
247 goto out;
248 }
249
250 len = PAGE_ALIGN(len);
251 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
252
253 down_write(&current->mm->mmap_sem);
254 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
255 up_write(&current->mm->mmap_sem);
256
257 if (file)
258 fput(file);
259out:
260 return retval;
261}
262 229
263asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, 230asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
264 unsigned long prot, unsigned long flags, unsigned long fd, 231 unsigned long prot, unsigned long flags, unsigned long fd,
@@ -266,14 +233,16 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
266{ 233{
267 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE 234 /* Make sure the shift for mmap2 is constant (12), no matter what PAGE_SIZE
268 we have. */ 235 we have. */
269 return do_mmap2(addr, len, prot, flags, fd, pgoff >> (PAGE_SHIFT - 12)); 236 return sys_mmap_pgoff(addr, len, prot, flags, fd,
237 pgoff >> (PAGE_SHIFT - 12));
270} 238}
271 239
272asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, 240asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
273 unsigned long prot, unsigned long flags, unsigned long fd, 241 unsigned long prot, unsigned long flags, unsigned long fd,
274 unsigned long off) 242 unsigned long off)
275{ 243{
276 return do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); 244 /* no alignment check? */
245 return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
277} 246}
278 247
279long sparc_remap_file_pages(unsigned long start, unsigned long size, 248long sparc_remap_file_pages(unsigned long start, unsigned long size,
@@ -287,27 +256,6 @@ long sparc_remap_file_pages(unsigned long start, unsigned long size,
287 (pgoff >> (PAGE_SHIFT - 12)), flags); 256 (pgoff >> (PAGE_SHIFT - 12)), flags);
288} 257}
289 258
290extern unsigned long do_mremap(unsigned long addr,
291 unsigned long old_len, unsigned long new_len,
292 unsigned long flags, unsigned long new_addr);
293
294asmlinkage unsigned long sparc_mremap(unsigned long addr,
295 unsigned long old_len, unsigned long new_len,
296 unsigned long flags, unsigned long new_addr)
297{
298 unsigned long ret = -EINVAL;
299
300 if (unlikely(sparc_mmap_check(addr, old_len)))
301 goto out;
302 if (unlikely(sparc_mmap_check(new_addr, new_len)))
303 goto out;
304 down_write(&current->mm->mmap_sem);
305 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
306 up_write(&current->mm->mmap_sem);
307out:
308 return ret;
309}
310
311/* we come to here via sys_nis_syscall so it can setup the regs argument */ 259/* we come to here via sys_nis_syscall so it can setup the regs argument */
312asmlinkage unsigned long 260asmlinkage unsigned long
313c_sys_nis_syscall (struct pt_regs *regs) 261c_sys_nis_syscall (struct pt_regs *regs)
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index e2d102447a43..cfa0e19abe3b 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -317,10 +317,14 @@ bottomup:
317unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) 317unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
318{ 318{
319 unsigned long align_goal, addr = -ENOMEM; 319 unsigned long align_goal, addr = -ENOMEM;
320 unsigned long (*get_area)(struct file *, unsigned long,
321 unsigned long, unsigned long, unsigned long);
322
323 get_area = current->mm->get_unmapped_area;
320 324
321 if (flags & MAP_FIXED) { 325 if (flags & MAP_FIXED) {
322 /* Ok, don't mess with it. */ 326 /* Ok, don't mess with it. */
323 return get_unmapped_area(NULL, orig_addr, len, pgoff, flags); 327 return get_area(NULL, orig_addr, len, pgoff, flags);
324 } 328 }
325 flags &= ~MAP_SHARED; 329 flags &= ~MAP_SHARED;
326 330
@@ -333,7 +337,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
333 align_goal = (64UL * 1024); 337 align_goal = (64UL * 1024);
334 338
335 do { 339 do {
336 addr = get_unmapped_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); 340 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
337 if (!(addr & ~PAGE_MASK)) { 341 if (!(addr & ~PAGE_MASK)) {
338 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); 342 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
339 break; 343 break;
@@ -351,7 +355,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
351 * be obtained. 355 * be obtained.
352 */ 356 */
353 if (addr & ~PAGE_MASK) 357 if (addr & ~PAGE_MASK)
354 addr = get_unmapped_area(NULL, orig_addr, len, pgoff, flags); 358 addr = get_area(NULL, orig_addr, len, pgoff, flags);
355 359
356 return addr; 360 return addr;
357} 361}
@@ -399,18 +403,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
399 } 403 }
400} 404}
401 405
402SYSCALL_DEFINE1(sparc_brk, unsigned long, brk)
403{
404 /* People could try to be nasty and use ta 0x6d in 32bit programs */
405 if (test_thread_flag(TIF_32BIT) && brk >= STACK_TOP32)
406 return current->mm->brk;
407
408 if (unlikely(straddles_64bit_va_hole(current->mm->brk, brk)))
409 return current->mm->brk;
410
411 return sys_brk(brk);
412}
413
414/* 406/*
415 * sys_pipe() is the normal C calling standard for creating 407 * sys_pipe() is the normal C calling standard for creating
416 * a pipe. It's not the way unix traditionally does this, though. 408 * a pipe. It's not the way unix traditionally does this, though.
@@ -568,23 +560,13 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
568 unsigned long, prot, unsigned long, flags, unsigned long, fd, 560 unsigned long, prot, unsigned long, flags, unsigned long, fd,
569 unsigned long, off) 561 unsigned long, off)
570{ 562{
571 struct file * file = NULL; 563 unsigned long retval = -EINVAL;
572 unsigned long retval = -EBADF;
573
574 if (!(flags & MAP_ANONYMOUS)) {
575 file = fget(fd);
576 if (!file)
577 goto out;
578 }
579 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
580 len = PAGE_ALIGN(len);
581 564
582 down_write(&current->mm->mmap_sem); 565 if ((off + PAGE_ALIGN(len)) < off)
583 retval = do_mmap(file, addr, len, prot, flags, off); 566 goto out;
584 up_write(&current->mm->mmap_sem); 567 if (off & ~PAGE_MASK)
585 568 goto out;
586 if (file) 569 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
587 fput(file);
588out: 570out:
589 return retval; 571 return retval;
590} 572}
@@ -614,12 +596,6 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
614 596
615 if (test_thread_flag(TIF_32BIT)) 597 if (test_thread_flag(TIF_32BIT))
616 goto out; 598 goto out;
617 if (unlikely(new_len >= VA_EXCLUDE_START))
618 goto out;
619 if (unlikely(sparc_mmap_check(addr, old_len)))
620 goto out;
621 if (unlikely(sparc_mmap_check(new_addr, new_len)))
622 goto out;
623 599
624 down_write(&current->mm->mmap_sem); 600 down_write(&current->mm->mmap_sem);
625 ret = do_mremap(addr, old_len, new_len, flags, new_addr); 601 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h
index a63c5d2d9849..d2f999ae2b85 100644
--- a/arch/sparc/kernel/systbls.h
+++ b/arch/sparc/kernel/systbls.h
@@ -9,7 +9,6 @@
9struct new_utsname; 9struct new_utsname;
10 10
11extern asmlinkage unsigned long sys_getpagesize(void); 11extern asmlinkage unsigned long sys_getpagesize(void);
12extern asmlinkage unsigned long sparc_brk(unsigned long brk);
13extern asmlinkage long sparc_pipe(struct pt_regs *regs); 12extern asmlinkage long sparc_pipe(struct pt_regs *regs);
14extern asmlinkage long sys_ipc(unsigned int call, int first, 13extern asmlinkage long sys_ipc(unsigned int call, int first,
15 unsigned long second, 14 unsigned long second,
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index ceb1530f8aa6..801fc8e5a0e8 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -19,7 +19,7 @@ sys_call_table:
19/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write 19/*0*/ .long sys_restart_syscall, sys_exit, sys_fork, sys_read, sys_write
20/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link 20/*5*/ .long sys_open, sys_close, sys_wait4, sys_creat, sys_link
21/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod 21/*10*/ .long sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
22/*15*/ .long sys_chmod, sys_lchown16, sparc_brk, sys_nis_syscall, sys_lseek 22/*15*/ .long sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
23/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 23/*20*/ .long sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
24/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause 24/*25*/ .long sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_pause
25/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice 25/*30*/ .long sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
@@ -67,7 +67,7 @@ sys_call_table:
67/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall 67/*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall
68/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler 68/*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler
69/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep 69/*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep
70/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl 70/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
71/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep 71/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
72/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun 72/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
73/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy 73/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index cc8e7862e95a..e575b46bd7a9 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@ sys_call_table32:
21/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write 21/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write
22/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link 22/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link
23/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod 23/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod
24/*15*/ .word sys_chmod, sys_lchown16, sys_sparc_brk, sys32_perfctr, sys32_lseek 24/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek
25/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 25/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
26/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause 26/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause
27/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice 27/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice
@@ -68,7 +68,7 @@ sys_call_table32:
68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall 68 .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall
69/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler 69/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler
70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep 70 .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep
71/*250*/ .word sys32_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl 71/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep 72 .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
73/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun 73/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy 74 .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
@@ -96,7 +96,7 @@ sys_call_table:
96/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write 96/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
97/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link 97/*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link
98/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod 98/*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod
99/*15*/ .word sys_chmod, sys_lchown, sys_sparc_brk, sys_perfctr, sys_lseek 99/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek
100/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid 100/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid
101/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall 101/*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall
102/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice 102/*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice
diff --git a/arch/um/kernel/syscall.c b/arch/um/kernel/syscall.c
index a4625c7b2bf9..cccab850c27e 100644
--- a/arch/um/kernel/syscall.c
+++ b/arch/um/kernel/syscall.c
@@ -8,6 +8,7 @@
8#include "linux/mm.h" 8#include "linux/mm.h"
9#include "linux/sched.h" 9#include "linux/sched.h"
10#include "linux/utsname.h" 10#include "linux/utsname.h"
11#include "linux/syscalls.h"
11#include "asm/current.h" 12#include "asm/current.h"
12#include "asm/mman.h" 13#include "asm/mman.h"
13#include "asm/uaccess.h" 14#include "asm/uaccess.h"
@@ -37,31 +38,6 @@ long sys_vfork(void)
37 return ret; 38 return ret;
38} 39}
39 40
40/* common code for old and new mmaps */
41long sys_mmap2(unsigned long addr, unsigned long len,
42 unsigned long prot, unsigned long flags,
43 unsigned long fd, unsigned long pgoff)
44{
45 long error = -EBADF;
46 struct file * file = NULL;
47
48 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
49 if (!(flags & MAP_ANONYMOUS)) {
50 file = fget(fd);
51 if (!file)
52 goto out;
53 }
54
55 down_write(&current->mm->mmap_sem);
56 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
57 up_write(&current->mm->mmap_sem);
58
59 if (file)
60 fput(file);
61 out:
62 return error;
63}
64
65long old_mmap(unsigned long addr, unsigned long len, 41long old_mmap(unsigned long addr, unsigned long len,
66 unsigned long prot, unsigned long flags, 42 unsigned long prot, unsigned long flags,
67 unsigned long fd, unsigned long offset) 43 unsigned long fd, unsigned long offset)
@@ -70,7 +46,7 @@ long old_mmap(unsigned long addr, unsigned long len,
70 if (offset & ~PAGE_MASK) 46 if (offset & ~PAGE_MASK)
71 goto out; 47 goto out;
72 48
73 err = sys_mmap2(addr, len, prot, flags, fd, offset >> PAGE_SHIFT); 49 err = sys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
74 out: 50 out:
75 return err; 51 return err;
76} 52}
diff --git a/arch/um/sys-i386/shared/sysdep/syscalls.h b/arch/um/sys-i386/shared/sysdep/syscalls.h
index 905698197e35..e7787679e317 100644
--- a/arch/um/sys-i386/shared/sysdep/syscalls.h
+++ b/arch/um/sys-i386/shared/sysdep/syscalls.h
@@ -20,7 +20,3 @@ extern syscall_handler_t *sys_call_table[];
20#define EXECUTE_SYSCALL(syscall, regs) \ 20#define EXECUTE_SYSCALL(syscall, regs) \
21 ((long (*)(struct syscall_args)) \ 21 ((long (*)(struct syscall_args)) \
22 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs)) 22 (*sys_call_table[syscall]))(SYSCALL_ARGS(&regs->regs))
23
24extern long sys_mmap2(unsigned long addr, unsigned long len,
25 unsigned long prot, unsigned long flags,
26 unsigned long fd, unsigned long pgoff);
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 4eefdca9832b..53147ad85b96 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -696,7 +696,7 @@ ia32_sys_call_table:
696 .quad quiet_ni_syscall /* streams2 */ 696 .quad quiet_ni_syscall /* streams2 */
697 .quad stub32_vfork /* 190 */ 697 .quad stub32_vfork /* 190 */
698 .quad compat_sys_getrlimit 698 .quad compat_sys_getrlimit
699 .quad sys32_mmap2 699 .quad sys_mmap_pgoff
700 .quad sys32_truncate64 700 .quad sys32_truncate64
701 .quad sys32_ftruncate64 701 .quad sys32_ftruncate64
702 .quad sys32_stat64 /* 195 */ 702 .quad sys32_stat64 /* 195 */
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index df82c0e48ded..422572c77923 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -155,9 +155,6 @@ struct mmap_arg_struct {
155asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg) 155asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
156{ 156{
157 struct mmap_arg_struct a; 157 struct mmap_arg_struct a;
158 struct file *file = NULL;
159 unsigned long retval;
160 struct mm_struct *mm ;
161 158
162 if (copy_from_user(&a, arg, sizeof(a))) 159 if (copy_from_user(&a, arg, sizeof(a)))
163 return -EFAULT; 160 return -EFAULT;
@@ -165,22 +162,8 @@ asmlinkage long sys32_mmap(struct mmap_arg_struct __user *arg)
165 if (a.offset & ~PAGE_MASK) 162 if (a.offset & ~PAGE_MASK)
166 return -EINVAL; 163 return -EINVAL;
167 164
168 if (!(a.flags & MAP_ANONYMOUS)) { 165 return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
169 file = fget(a.fd);
170 if (!file)
171 return -EBADF;
172 }
173
174 mm = current->mm;
175 down_write(&mm->mmap_sem);
176 retval = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags,
177 a.offset>>PAGE_SHIFT); 166 a.offset>>PAGE_SHIFT);
178 if (file)
179 fput(file);
180
181 up_write(&mm->mmap_sem);
182
183 return retval;
184} 167}
185 168
186asmlinkage long sys32_mprotect(unsigned long start, size_t len, 169asmlinkage long sys32_mprotect(unsigned long start, size_t len,
@@ -483,30 +466,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
483 return ret; 466 return ret;
484} 467}
485 468
486asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len,
487 unsigned long prot, unsigned long flags,
488 unsigned long fd, unsigned long pgoff)
489{
490 struct mm_struct *mm = current->mm;
491 unsigned long error;
492 struct file *file = NULL;
493
494 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
495 if (!(flags & MAP_ANONYMOUS)) {
496 file = fget(fd);
497 if (!file)
498 return -EBADF;
499 }
500
501 down_write(&mm->mmap_sem);
502 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
503 up_write(&mm->mmap_sem);
504
505 if (file)
506 fput(file);
507 return error;
508}
509
510asmlinkage long sys32_olduname(struct oldold_utsname __user *name) 469asmlinkage long sys32_olduname(struct oldold_utsname __user *name)
511{ 470{
512 char *arch = "x86_64"; 471 char *arch = "x86_64";
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index b399988eee3a..b4bf9a942ed0 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -118,11 +118,27 @@ extern int __init pcibios_init(void);
118 118
119/* pci-mmconfig.c */ 119/* pci-mmconfig.c */
120 120
121/* "PCI MMCONFIG %04x [bus %02x-%02x]" */
122#define PCI_MMCFG_RESOURCE_NAME_LEN (22 + 4 + 2 + 2)
123
124struct pci_mmcfg_region {
125 struct list_head list;
126 struct resource res;
127 u64 address;
128 char __iomem *virt;
129 u16 segment;
130 u8 start_bus;
131 u8 end_bus;
132 char name[PCI_MMCFG_RESOURCE_NAME_LEN];
133};
134
121extern int __init pci_mmcfg_arch_init(void); 135extern int __init pci_mmcfg_arch_init(void);
122extern void __init pci_mmcfg_arch_free(void); 136extern void __init pci_mmcfg_arch_free(void);
137extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus);
138
139extern struct list_head pci_mmcfg_list;
123 140
124extern struct acpi_mcfg_allocation *pci_mmcfg_config; 141#define PCI_MMCFG_BUS_OFFSET(bus) ((bus) << 20)
125extern int pci_mmcfg_config_num;
126 142
127/* 143/*
128 * AMD Fam10h CPUs are buggy, and cannot access MMIO config space 144 * AMD Fam10h CPUs are buggy, and cannot access MMIO config space
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 9af9decb38c3..4a5a089e1c62 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -57,9 +57,6 @@ asmlinkage long sys32_pwrite(unsigned int, char __user *, u32, u32, u32);
57asmlinkage long sys32_personality(unsigned long); 57asmlinkage long sys32_personality(unsigned long);
58asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32); 58asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
59 59
60asmlinkage long sys32_mmap2(unsigned long, unsigned long, unsigned long,
61 unsigned long, unsigned long, unsigned long);
62
63struct oldold_utsname; 60struct oldold_utsname;
64struct old_utsname; 61struct old_utsname;
65asmlinkage long sys32_olduname(struct oldold_utsname __user *); 62asmlinkage long sys32_olduname(struct oldold_utsname __user *);
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 372b76edd63f..1bb6e395881c 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -55,8 +55,6 @@ struct sel_arg_struct;
55struct oldold_utsname; 55struct oldold_utsname;
56struct old_utsname; 56struct old_utsname;
57 57
58asmlinkage long sys_mmap2(unsigned long, unsigned long, unsigned long,
59 unsigned long, unsigned long, unsigned long);
60asmlinkage int old_mmap(struct mmap_arg_struct __user *); 58asmlinkage int old_mmap(struct mmap_arg_struct __user *);
61asmlinkage int old_select(struct sel_arg_struct __user *); 59asmlinkage int old_select(struct sel_arg_struct __user *);
62asmlinkage int sys_ipc(uint, int, int, int, void __user *, long); 60asmlinkage int sys_ipc(uint, int, int, int, void __user *, long);
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index d5b7e90c0edf..396ff4cc8ed4 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -37,31 +37,4 @@
37extern struct shared_info *HYPERVISOR_shared_info; 37extern struct shared_info *HYPERVISOR_shared_info;
38extern struct start_info *xen_start_info; 38extern struct start_info *xen_start_info;
39 39
40enum xen_domain_type {
41 XEN_NATIVE, /* running on bare hardware */
42 XEN_PV_DOMAIN, /* running in a PV domain */
43 XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
44};
45
46#ifdef CONFIG_XEN
47extern enum xen_domain_type xen_domain_type;
48#else
49#define xen_domain_type XEN_NATIVE
50#endif
51
52#define xen_domain() (xen_domain_type != XEN_NATIVE)
53#define xen_pv_domain() (xen_domain() && \
54 xen_domain_type == XEN_PV_DOMAIN)
55#define xen_hvm_domain() (xen_domain() && \
56 xen_domain_type == XEN_HVM_DOMAIN)
57
58#ifdef CONFIG_XEN_DOM0
59#include <xen/interface/xen.h>
60
61#define xen_initial_domain() (xen_pv_domain() && \
62 xen_start_info->flags & SIF_INITDOMAIN)
63#else /* !CONFIG_XEN_DOM0 */
64#define xen_initial_domain() (0)
65#endif /* CONFIG_XEN_DOM0 */
66
67#endif /* _ASM_X86_XEN_HYPERVISOR_H */ 40#endif /* _ASM_X86_XEN_HYPERVISOR_H */
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 7ffc39965233..9c4a6f747552 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -1336,6 +1336,9 @@ void __init amd_iommu_detect(void)
1336 iommu_detected = 1; 1336 iommu_detected = 1;
1337 amd_iommu_detected = 1; 1337 amd_iommu_detected = 1;
1338 x86_init.iommu.iommu_init = amd_iommu_init; 1338 x86_init.iommu.iommu_init = amd_iommu_init;
1339
1340 /* Make sure ACS will be enabled */
1341 pci_request_acs();
1339 } 1342 }
1340} 1343}
1341 1344
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 20a5b3689463..dd74fe7273b1 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -86,9 +86,15 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
86 gdb_regs[GDB_DS] = regs->ds; 86 gdb_regs[GDB_DS] = regs->ds;
87 gdb_regs[GDB_ES] = regs->es; 87 gdb_regs[GDB_ES] = regs->es;
88 gdb_regs[GDB_CS] = regs->cs; 88 gdb_regs[GDB_CS] = regs->cs;
89 gdb_regs[GDB_SS] = __KERNEL_DS;
90 gdb_regs[GDB_FS] = 0xFFFF; 89 gdb_regs[GDB_FS] = 0xFFFF;
91 gdb_regs[GDB_GS] = 0xFFFF; 90 gdb_regs[GDB_GS] = 0xFFFF;
91 if (user_mode_vm(regs)) {
92 gdb_regs[GDB_SS] = regs->ss;
93 gdb_regs[GDB_SP] = regs->sp;
94 } else {
95 gdb_regs[GDB_SS] = __KERNEL_DS;
96 gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
97 }
92#else 98#else
93 gdb_regs[GDB_R8] = regs->r8; 99 gdb_regs[GDB_R8] = regs->r8;
94 gdb_regs[GDB_R9] = regs->r9; 100 gdb_regs[GDB_R9] = regs->r9;
@@ -101,8 +107,8 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
101 gdb_regs32[GDB_PS] = regs->flags; 107 gdb_regs32[GDB_PS] = regs->flags;
102 gdb_regs32[GDB_CS] = regs->cs; 108 gdb_regs32[GDB_CS] = regs->cs;
103 gdb_regs32[GDB_SS] = regs->ss; 109 gdb_regs32[GDB_SS] = regs->ss;
104#endif
105 gdb_regs[GDB_SP] = kernel_stack_pointer(regs); 110 gdb_regs[GDB_SP] = kernel_stack_pointer(regs);
111#endif
106} 112}
107 113
108/** 114/**
@@ -220,8 +226,7 @@ static void kgdb_correct_hw_break(void)
220 dr7 |= ((breakinfo[breakno].len << 2) | 226 dr7 |= ((breakinfo[breakno].len << 2) |
221 breakinfo[breakno].type) << 227 breakinfo[breakno].type) <<
222 ((breakno << 2) + 16); 228 ((breakno << 2) + 16);
223 if (breakno >= 0 && breakno <= 3) 229 set_debugreg(breakinfo[breakno].addr, breakno);
224 set_debugreg(breakinfo[breakno].addr, breakno);
225 230
226 } else { 231 } else {
227 if ((dr7 & breakbit) && !breakinfo[breakno].enabled) { 232 if ((dr7 & breakbit) && !breakinfo[breakno].enabled) {
@@ -395,7 +400,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
395 /* set the trace bit if we're stepping */ 400 /* set the trace bit if we're stepping */
396 if (remcomInBuffer[0] == 's') { 401 if (remcomInBuffer[0] == 's') {
397 linux_regs->flags |= X86_EFLAGS_TF; 402 linux_regs->flags |= X86_EFLAGS_TF;
398 kgdb_single_step = 1;
399 atomic_set(&kgdb_cpu_doing_single_step, 403 atomic_set(&kgdb_cpu_doing_single_step,
400 raw_smp_processor_id()); 404 raw_smp_processor_id());
401 } 405 }
diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
index 1884a8d12bfa..dee1ff7cba58 100644
--- a/arch/x86/kernel/sys_i386_32.c
+++ b/arch/x86/kernel/sys_i386_32.c
@@ -24,31 +24,6 @@
24 24
25#include <asm/syscalls.h> 25#include <asm/syscalls.h>
26 26
27asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
28 unsigned long prot, unsigned long flags,
29 unsigned long fd, unsigned long pgoff)
30{
31 int error = -EBADF;
32 struct file *file = NULL;
33 struct mm_struct *mm = current->mm;
34
35 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
36 if (!(flags & MAP_ANONYMOUS)) {
37 file = fget(fd);
38 if (!file)
39 goto out;
40 }
41
42 down_write(&mm->mmap_sem);
43 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
44 up_write(&mm->mmap_sem);
45
46 if (file)
47 fput(file);
48out:
49 return error;
50}
51
52/* 27/*
53 * Perform the select(nd, in, out, ex, tv) and mmap() system 28 * Perform the select(nd, in, out, ex, tv) and mmap() system
54 * calls. Linux/i386 didn't use to be able to handle more than 29 * calls. Linux/i386 didn't use to be able to handle more than
@@ -77,7 +52,7 @@ asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
77 if (a.offset & ~PAGE_MASK) 52 if (a.offset & ~PAGE_MASK)
78 goto out; 53 goto out;
79 54
80 err = sys_mmap2(a.addr, a.len, a.prot, a.flags, 55 err = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags,
81 a.fd, a.offset >> PAGE_SHIFT); 56 a.fd, a.offset >> PAGE_SHIFT);
82out: 57out:
83 return err; 58 return err;
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index 45e00eb09c3a..8aa2057efd12 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -23,26 +23,11 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
23 unsigned long, fd, unsigned long, off) 23 unsigned long, fd, unsigned long, off)
24{ 24{
25 long error; 25 long error;
26 struct file *file;
27
28 error = -EINVAL; 26 error = -EINVAL;
29 if (off & ~PAGE_MASK) 27 if (off & ~PAGE_MASK)
30 goto out; 28 goto out;
31 29
32 error = -EBADF; 30 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
33 file = NULL;
34 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35 if (!(flags & MAP_ANONYMOUS)) {
36 file = fget(fd);
37 if (!file)
38 goto out;
39 }
40 down_write(&current->mm->mmap_sem);
41 error = do_mmap_pgoff(file, addr, len, prot, flags, off >> PAGE_SHIFT);
42 up_write(&current->mm->mmap_sem);
43
44 if (file)
45 fput(file);
46out: 31out:
47 return error; 32 return error;
48} 33}
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 70c2125d55b9..15228b5d3eb7 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -191,7 +191,7 @@ ENTRY(sys_call_table)
191 .long sys_ni_syscall /* reserved for streams2 */ 191 .long sys_ni_syscall /* reserved for streams2 */
192 .long ptregs_vfork /* 190 */ 192 .long ptregs_vfork /* 190 */
193 .long sys_getrlimit 193 .long sys_getrlimit
194 .long sys_mmap2 194 .long sys_mmap_pgoff
195 .long sys_truncate64 195 .long sys_truncate64
196 .long sys_ftruncate64 196 .long sys_ftruncate64
197 .long sys_stat64 /* 195 */ 197 .long sys_stat64 /* 195 */
diff --git a/arch/x86/pci/Makefile b/arch/x86/pci/Makefile
index d49202e740ea..564b008a51c7 100644
--- a/arch/x86/pci/Makefile
+++ b/arch/x86/pci/Makefile
@@ -15,3 +15,8 @@ obj-$(CONFIG_X86_NUMAQ) += numaq_32.o
15 15
16obj-y += common.o early.o 16obj-y += common.o early.o
17obj-y += amd_bus.o 17obj-y += amd_bus.o
18obj-$(CONFIG_X86_64) += bus_numa.o intel_bus.o
19
20ifeq ($(CONFIG_PCI_DEBUG),y)
21EXTRA_CFLAGS += -DDEBUG
22endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 1014eb4bfc37..959e548a7039 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -7,6 +7,7 @@
7#include <asm/pci_x86.h> 7#include <asm/pci_x86.h>
8 8
9struct pci_root_info { 9struct pci_root_info {
10 struct acpi_device *bridge;
10 char *name; 11 char *name;
11 unsigned int res_num; 12 unsigned int res_num;
12 struct resource *res; 13 struct resource *res;
@@ -58,6 +59,30 @@ bus_has_transparent_bridge(struct pci_bus *bus)
58 return false; 59 return false;
59} 60}
60 61
62static void
63align_resource(struct acpi_device *bridge, struct resource *res)
64{
65 int align = (res->flags & IORESOURCE_MEM) ? 16 : 4;
66
67 /*
68 * Host bridge windows are not BARs, but the decoders on the PCI side
69 * that claim this address space have starting alignment and length
70 * constraints, so fix any obvious BIOS goofs.
71 */
72 if (!IS_ALIGNED(res->start, align)) {
73 dev_printk(KERN_DEBUG, &bridge->dev,
74 "host bridge window %pR invalid; "
75 "aligning start to %d-byte boundary\n", res, align);
76 res->start &= ~(align - 1);
77 }
78 if (!IS_ALIGNED(res->end + 1, align)) {
79 dev_printk(KERN_DEBUG, &bridge->dev,
80 "host bridge window %pR invalid; "
81 "aligning end to %d-byte boundary\n", res, align);
82 res->end = ALIGN(res->end, align) - 1;
83 }
84}
85
61static acpi_status 86static acpi_status
62setup_resource(struct acpi_resource *acpi_res, void *data) 87setup_resource(struct acpi_resource *acpi_res, void *data)
63{ 88{
@@ -91,11 +116,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
91 start = addr.minimum + addr.translation_offset; 116 start = addr.minimum + addr.translation_offset;
92 end = start + addr.address_length - 1; 117 end = start + addr.address_length - 1;
93 if (info->res_num >= max_root_bus_resources) { 118 if (info->res_num >= max_root_bus_resources) {
94 printk(KERN_WARNING "PCI: Failed to allocate 0x%lx-0x%lx " 119 if (pci_probe & PCI_USE__CRS)
95 "from %s for %s due to _CRS returning more than " 120 printk(KERN_WARNING "PCI: Failed to allocate "
96 "%d resource descriptors\n", (unsigned long) start, 121 "0x%lx-0x%lx from %s for %s due to _CRS "
97 (unsigned long) end, root->name, info->name, 122 "returning more than %d resource descriptors\n",
98 max_root_bus_resources); 123 (unsigned long) start, (unsigned long) end,
124 root->name, info->name, max_root_bus_resources);
99 return AE_OK; 125 return AE_OK;
100 } 126 }
101 127
@@ -105,14 +131,28 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
105 res->start = start; 131 res->start = start;
106 res->end = end; 132 res->end = end;
107 res->child = NULL; 133 res->child = NULL;
134 align_resource(info->bridge, res);
135
136 if (!(pci_probe & PCI_USE__CRS)) {
137 dev_printk(KERN_DEBUG, &info->bridge->dev,
138 "host bridge window %pR (ignored)\n", res);
139 return AE_OK;
140 }
108 141
109 if (insert_resource(root, res)) { 142 if (insert_resource(root, res)) {
110 printk(KERN_ERR "PCI: Failed to allocate 0x%lx-0x%lx " 143 dev_err(&info->bridge->dev,
111 "from %s for %s\n", (unsigned long) res->start, 144 "can't allocate host bridge window %pR\n", res);
112 (unsigned long) res->end, root->name, info->name);
113 } else { 145 } else {
114 info->bus->resource[info->res_num] = res; 146 info->bus->resource[info->res_num] = res;
115 info->res_num++; 147 info->res_num++;
148 if (addr.translation_offset)
149 dev_info(&info->bridge->dev, "host bridge window %pR "
150 "(PCI address [%#llx-%#llx])\n",
151 res, res->start - addr.translation_offset,
152 res->end - addr.translation_offset);
153 else
154 dev_info(&info->bridge->dev,
155 "host bridge window %pR\n", res);
116 } 156 }
117 return AE_OK; 157 return AE_OK;
118} 158}
@@ -124,6 +164,12 @@ get_current_resources(struct acpi_device *device, int busnum,
124 struct pci_root_info info; 164 struct pci_root_info info;
125 size_t size; 165 size_t size;
126 166
167 if (!(pci_probe & PCI_USE__CRS))
168 dev_info(&device->dev,
169 "ignoring host bridge windows from ACPI; "
170 "boot with \"pci=use_crs\" to use them\n");
171
172 info.bridge = device;
127 info.bus = bus; 173 info.bus = bus;
128 info.res_num = 0; 174 info.res_num = 0;
129 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource, 175 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
@@ -163,8 +209,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
163#endif 209#endif
164 210
165 if (domain && !pci_domains_supported) { 211 if (domain && !pci_domains_supported) {
166 printk(KERN_WARNING "PCI: Multiple domains not supported " 212 printk(KERN_WARNING "pci_bus %04x:%02x: "
167 "(dom %d, bus %d)\n", domain, busnum); 213 "ignored (multiple domains not supported)\n",
214 domain, busnum);
168 return NULL; 215 return NULL;
169 } 216 }
170 217
@@ -188,7 +235,8 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
188 */ 235 */
189 sd = kzalloc(sizeof(*sd), GFP_KERNEL); 236 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
190 if (!sd) { 237 if (!sd) {
191 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum); 238 printk(KERN_WARNING "pci_bus %04x:%02x: "
239 "ignored (out of memory)\n", domain, busnum);
192 return NULL; 240 return NULL;
193 } 241 }
194 242
@@ -209,9 +257,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
209 } else { 257 } else {
210 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd); 258 bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
211 if (bus) { 259 if (bus) {
212 if (pci_probe & PCI_USE__CRS) 260 get_current_resources(device, busnum, domain, bus);
213 get_current_resources(device, busnum, domain,
214 bus);
215 bus->subordinate = pci_scan_child_bus(bus); 261 bus->subordinate = pci_scan_child_bus(bus);
216 } 262 }
217 } 263 }
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
index 572ee9782f2a..95ecbd495955 100644
--- a/arch/x86/pci/amd_bus.c
+++ b/arch/x86/pci/amd_bus.c
@@ -6,10 +6,10 @@
6 6
7#ifdef CONFIG_X86_64 7#ifdef CONFIG_X86_64
8#include <asm/pci-direct.h> 8#include <asm/pci-direct.h>
9#include <asm/mpspec.h>
10#include <linux/cpumask.h>
11#endif 9#endif
12 10
11#include "bus_numa.h"
12
13/* 13/*
14 * This discovers the pcibus <-> node mapping on AMD K8. 14 * This discovers the pcibus <-> node mapping on AMD K8.
15 * also get peer root bus resource for io,mmio 15 * also get peer root bus resource for io,mmio
@@ -17,67 +17,6 @@
17 17
18#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
19 19
20/*
21 * sub bus (transparent) will use entres from 3 to store extra from root,
22 * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
23 */
24#define RES_NUM 16
25struct pci_root_info {
26 char name[12];
27 unsigned int res_num;
28 struct resource res[RES_NUM];
29 int bus_min;
30 int bus_max;
31 int node;
32 int link;
33};
34
35/* 4 at this time, it may become to 32 */
36#define PCI_ROOT_NR 4
37static int pci_root_num;
38static struct pci_root_info pci_root_info[PCI_ROOT_NR];
39
40void x86_pci_root_bus_res_quirks(struct pci_bus *b)
41{
42 int i;
43 int j;
44 struct pci_root_info *info;
45
46 /* don't go for it if _CRS is used already */
47 if (b->resource[0] != &ioport_resource ||
48 b->resource[1] != &iomem_resource)
49 return;
50
51 /* if only one root bus, don't need to anything */
52 if (pci_root_num < 2)
53 return;
54
55 for (i = 0; i < pci_root_num; i++) {
56 if (pci_root_info[i].bus_min == b->number)
57 break;
58 }
59
60 if (i == pci_root_num)
61 return;
62
63 printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
64 b->number);
65
66 info = &pci_root_info[i];
67 for (j = 0; j < info->res_num; j++) {
68 struct resource *res;
69 struct resource *root;
70
71 res = &info->res[j];
72 b->resource[j] = res;
73 if (res->flags & IORESOURCE_IO)
74 root = &ioport_resource;
75 else
76 root = &iomem_resource;
77 insert_resource(root, res);
78 }
79}
80
81#define RANGE_NUM 16 20#define RANGE_NUM 16
82 21
83struct res_range { 22struct res_range {
@@ -130,52 +69,6 @@ static void __init update_range(struct res_range *range, size_t start,
130 } 69 }
131} 70}
132 71
133static void __init update_res(struct pci_root_info *info, size_t start,
134 size_t end, unsigned long flags, int merge)
135{
136 int i;
137 struct resource *res;
138
139 if (!merge)
140 goto addit;
141
142 /* try to merge it with old one */
143 for (i = 0; i < info->res_num; i++) {
144 size_t final_start, final_end;
145 size_t common_start, common_end;
146
147 res = &info->res[i];
148 if (res->flags != flags)
149 continue;
150
151 common_start = max((size_t)res->start, start);
152 common_end = min((size_t)res->end, end);
153 if (common_start > common_end + 1)
154 continue;
155
156 final_start = min((size_t)res->start, start);
157 final_end = max((size_t)res->end, end);
158
159 res->start = final_start;
160 res->end = final_end;
161 return;
162 }
163
164addit:
165
166 /* need to add that */
167 if (info->res_num >= RES_NUM)
168 return;
169
170 res = &info->res[info->res_num];
171 res->name = info->name;
172 res->flags = flags;
173 res->start = start;
174 res->end = end;
175 res->child = NULL;
176 info->res_num++;
177}
178
179struct pci_hostbridge_probe { 72struct pci_hostbridge_probe {
180 u32 bus; 73 u32 bus;
181 u32 slot; 74 u32 slot;
@@ -230,7 +123,6 @@ static int __init early_fill_mp_bus_info(void)
230 int j; 123 int j;
231 unsigned bus; 124 unsigned bus;
232 unsigned slot; 125 unsigned slot;
233 int found;
234 int node; 126 int node;
235 int link; 127 int link;
236 int def_node; 128 int def_node;
@@ -247,7 +139,7 @@ static int __init early_fill_mp_bus_info(void)
247 if (!early_pci_allowed()) 139 if (!early_pci_allowed())
248 return -1; 140 return -1;
249 141
250 found = 0; 142 found_all_numa_early = 0;
251 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { 143 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
252 u32 id; 144 u32 id;
253 u16 device; 145 u16 device;
@@ -261,12 +153,12 @@ static int __init early_fill_mp_bus_info(void)
261 device = (id>>16) & 0xffff; 153 device = (id>>16) & 0xffff;
262 if (pci_probes[i].vendor == vendor && 154 if (pci_probes[i].vendor == vendor &&
263 pci_probes[i].device == device) { 155 pci_probes[i].device == device) {
264 found = 1; 156 found_all_numa_early = 1;
265 break; 157 break;
266 } 158 }
267 } 159 }
268 160
269 if (!found) 161 if (!found_all_numa_early)
270 return 0; 162 return 0;
271 163
272 pci_root_num = 0; 164 pci_root_num = 0;
@@ -488,7 +380,7 @@ static int __init early_fill_mp_bus_info(void)
488 info = &pci_root_info[i]; 380 info = &pci_root_info[i];
489 res_num = info->res_num; 381 res_num = info->res_num;
490 busnum = info->bus_min; 382 busnum = info->bus_min;
491 printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n", 383 printk(KERN_DEBUG "bus: [%02x, %02x] on node %x link %x\n",
492 info->bus_min, info->bus_max, info->node, info->link); 384 info->bus_min, info->bus_max, info->node, info->link);
493 for (j = 0; j < res_num; j++) { 385 for (j = 0; j < res_num; j++) {
494 res = &info->res[j]; 386 res = &info->res[j];
diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c
new file mode 100644
index 000000000000..145df00e0387
--- /dev/null
+++ b/arch/x86/pci/bus_numa.c
@@ -0,0 +1,101 @@
1#include <linux/init.h>
2#include <linux/pci.h>
3
4#include "bus_numa.h"
5
6int pci_root_num;
7struct pci_root_info pci_root_info[PCI_ROOT_NR];
8int found_all_numa_early;
9
10void x86_pci_root_bus_res_quirks(struct pci_bus *b)
11{
12 int i;
13 int j;
14 struct pci_root_info *info;
15
16 /* don't go for it if _CRS is used already */
17 if (b->resource[0] != &ioport_resource ||
18 b->resource[1] != &iomem_resource)
19 return;
20
21 if (!pci_root_num)
22 return;
23
24 /* for amd, if only one root bus, don't need to do anything */
25 if (pci_root_num < 2 && found_all_numa_early)
26 return;
27
28 for (i = 0; i < pci_root_num; i++) {
29 if (pci_root_info[i].bus_min == b->number)
30 break;
31 }
32
33 if (i == pci_root_num)
34 return;
35
36 printk(KERN_DEBUG "PCI: peer root bus %02x res updated from pci conf\n",
37 b->number);
38
39 info = &pci_root_info[i];
40 for (j = 0; j < info->res_num; j++) {
41 struct resource *res;
42 struct resource *root;
43
44 res = &info->res[j];
45 b->resource[j] = res;
46 if (res->flags & IORESOURCE_IO)
47 root = &ioport_resource;
48 else
49 root = &iomem_resource;
50 insert_resource(root, res);
51 }
52}
53
54void __init update_res(struct pci_root_info *info, size_t start,
55 size_t end, unsigned long flags, int merge)
56{
57 int i;
58 struct resource *res;
59
60 if (start > end)
61 return;
62
63 if (!merge)
64 goto addit;
65
66 /* try to merge it with old one */
67 for (i = 0; i < info->res_num; i++) {
68 size_t final_start, final_end;
69 size_t common_start, common_end;
70
71 res = &info->res[i];
72 if (res->flags != flags)
73 continue;
74
75 common_start = max((size_t)res->start, start);
76 common_end = min((size_t)res->end, end);
77 if (common_start > common_end + 1)
78 continue;
79
80 final_start = min((size_t)res->start, start);
81 final_end = max((size_t)res->end, end);
82
83 res->start = final_start;
84 res->end = final_end;
85 return;
86 }
87
88addit:
89
90 /* need to add that */
91 if (info->res_num >= RES_NUM)
92 return;
93
94 res = &info->res[info->res_num];
95 res->name = info->name;
96 res->flags = flags;
97 res->start = start;
98 res->end = end;
99 res->child = NULL;
100 info->res_num++;
101}
diff --git a/arch/x86/pci/bus_numa.h b/arch/x86/pci/bus_numa.h
new file mode 100644
index 000000000000..adbc23fe82ac
--- /dev/null
+++ b/arch/x86/pci/bus_numa.h
@@ -0,0 +1,27 @@
1#ifdef CONFIG_X86_64
2
3/*
4 * sub bus (transparent) will use entres from 3 to store extra from
5 * root, so need to make sure we have enough slot there, Should we
6 * increase PCI_BUS_NUM_RESOURCES?
7 */
8#define RES_NUM 16
9struct pci_root_info {
10 char name[12];
11 unsigned int res_num;
12 struct resource res[RES_NUM];
13 int bus_min;
14 int bus_max;
15 int node;
16 int link;
17};
18
19/* 4 at this time, it may become to 32 */
20#define PCI_ROOT_NR 4
21extern int pci_root_num;
22extern struct pci_root_info pci_root_info[PCI_ROOT_NR];
23extern int found_all_numa_early;
24
25extern void update_res(struct pci_root_info *info, size_t start,
26 size_t end, unsigned long flags, int merge);
27#endif
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 1331fcf26143..d2552c68e94d 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -410,8 +410,6 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
410 return bus; 410 return bus;
411} 411}
412 412
413extern u8 pci_cache_line_size;
414
415int __init pcibios_init(void) 413int __init pcibios_init(void)
416{ 414{
417 struct cpuinfo_x86 *c = &boot_cpu_data; 415 struct cpuinfo_x86 *c = &boot_cpu_data;
@@ -422,15 +420,19 @@ int __init pcibios_init(void)
422 } 420 }
423 421
424 /* 422 /*
425 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8 423 * Set PCI cacheline size to that of the CPU if the CPU has reported it.
426 * and P4. It's also good for 386/486s (which actually have 16) 424 * (For older CPUs that don't support cpuid, we se it to 32 bytes
425 * It's also good for 386/486s (which actually have 16)
427 * as quite a few PCI devices do not support smaller values. 426 * as quite a few PCI devices do not support smaller values.
428 */ 427 */
429 pci_cache_line_size = 32 >> 2; 428 if (c->x86_clflush_size > 0) {
430 if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD) 429 pci_dfl_cache_line_size = c->x86_clflush_size >> 2;
431 pci_cache_line_size = 64 >> 2; /* K7 & K8 */ 430 printk(KERN_DEBUG "PCI: pci_cache_line_size set to %d bytes\n",
432 else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL) 431 pci_dfl_cache_line_size << 2);
433 pci_cache_line_size = 128 >> 2; /* P4 */ 432 } else {
433 pci_dfl_cache_line_size = 32 >> 2;
434 printk(KERN_DEBUG "PCI: Unknown cacheline size. Setting to 32 bytes\n");
435 }
434 436
435 pcibios_resource_survey(); 437 pcibios_resource_survey();
436 438
diff --git a/arch/x86/pci/early.c b/arch/x86/pci/early.c
index aaf26ae58cd5..d1067d539bee 100644
--- a/arch/x86/pci/early.c
+++ b/arch/x86/pci/early.c
@@ -12,8 +12,6 @@ u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset)
12 u32 v; 12 u32 v;
13 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 13 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
14 v = inl(0xcfc); 14 v = inl(0xcfc);
15 if (v != 0xffffffff)
16 pr_debug("%x reading 4 from %x: %x\n", slot, offset, v);
17 return v; 15 return v;
18} 16}
19 17
@@ -22,7 +20,6 @@ u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset)
22 u8 v; 20 u8 v;
23 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 21 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
24 v = inb(0xcfc + (offset&3)); 22 v = inb(0xcfc + (offset&3));
25 pr_debug("%x reading 1 from %x: %x\n", slot, offset, v);
26 return v; 23 return v;
27} 24}
28 25
@@ -31,28 +28,24 @@ u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset)
31 u16 v; 28 u16 v;
32 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 29 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
33 v = inw(0xcfc + (offset&2)); 30 v = inw(0xcfc + (offset&2));
34 pr_debug("%x reading 2 from %x: %x\n", slot, offset, v);
35 return v; 31 return v;
36} 32}
37 33
38void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, 34void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset,
39 u32 val) 35 u32 val)
40{ 36{
41 pr_debug("%x writing to %x: %x\n", slot, offset, val);
42 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 37 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
43 outl(val, 0xcfc); 38 outl(val, 0xcfc);
44} 39}
45 40
46void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val) 41void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val)
47{ 42{
48 pr_debug("%x writing to %x: %x\n", slot, offset, val);
49 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 43 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
50 outb(val, 0xcfc + (offset&3)); 44 outb(val, 0xcfc + (offset&3));
51} 45}
52 46
53void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val) 47void write_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset, u16 val)
54{ 48{
55 pr_debug("%x writing to %x: %x\n", slot, offset, val);
56 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8); 49 outl(0x80000000 | (bus<<16) | (slot<<11) | (func<<8) | offset, 0xcf8);
57 outw(val, 0xcfc + (offset&2)); 50 outw(val, 0xcfc + (offset&2));
58} 51}
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index b22d13b0c71d..5dc9e8c63fcd 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -129,7 +129,9 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
129 continue; 129 continue;
130 if (!r->start || 130 if (!r->start ||
131 pci_claim_resource(dev, idx) < 0) { 131 pci_claim_resource(dev, idx) < 0) {
132 dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); 132 dev_info(&dev->dev,
133 "can't reserve window %pR\n",
134 r);
133 /* 135 /*
134 * Something is wrong with the region. 136 * Something is wrong with the region.
135 * Invalidate the resource to prevent 137 * Invalidate the resource to prevent
@@ -144,16 +146,29 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
144 } 146 }
145} 147}
146 148
149struct pci_check_idx_range {
150 int start;
151 int end;
152};
153
147static void __init pcibios_allocate_resources(int pass) 154static void __init pcibios_allocate_resources(int pass)
148{ 155{
149 struct pci_dev *dev = NULL; 156 struct pci_dev *dev = NULL;
150 int idx, disabled; 157 int idx, disabled, i;
151 u16 command; 158 u16 command;
152 struct resource *r; 159 struct resource *r;
153 160
161 struct pci_check_idx_range idx_range[] = {
162 { PCI_STD_RESOURCES, PCI_STD_RESOURCE_END },
163#ifdef CONFIG_PCI_IOV
164 { PCI_IOV_RESOURCES, PCI_IOV_RESOURCE_END },
165#endif
166 };
167
154 for_each_pci_dev(dev) { 168 for_each_pci_dev(dev) {
155 pci_read_config_word(dev, PCI_COMMAND, &command); 169 pci_read_config_word(dev, PCI_COMMAND, &command);
156 for (idx = 0; idx < PCI_ROM_RESOURCE; idx++) { 170 for (i = 0; i < ARRAY_SIZE(idx_range); i++)
171 for (idx = idx_range[i].start; idx <= idx_range[i].end; idx++) {
157 r = &dev->resource[idx]; 172 r = &dev->resource[idx];
158 if (r->parent) /* Already allocated */ 173 if (r->parent) /* Already allocated */
159 continue; 174 continue;
@@ -164,12 +179,12 @@ static void __init pcibios_allocate_resources(int pass)
164 else 179 else
165 disabled = !(command & PCI_COMMAND_MEMORY); 180 disabled = !(command & PCI_COMMAND_MEMORY);
166 if (pass == disabled) { 181 if (pass == disabled) {
167 dev_dbg(&dev->dev, "resource %#08llx-%#08llx (f=%lx, d=%d, p=%d)\n", 182 dev_dbg(&dev->dev,
168 (unsigned long long) r->start, 183 "BAR %d: reserving %pr (d=%d, p=%d)\n",
169 (unsigned long long) r->end, 184 idx, r, disabled, pass);
170 r->flags, disabled, pass);
171 if (pci_claim_resource(dev, idx) < 0) { 185 if (pci_claim_resource(dev, idx) < 0) {
172 dev_info(&dev->dev, "BAR %d: can't allocate resource\n", idx); 186 dev_info(&dev->dev,
187 "can't reserve %pR\n", r);
173 /* We'll assign a new address later */ 188 /* We'll assign a new address later */
174 r->end -= r->start; 189 r->end -= r->start;
175 r->start = 0; 190 r->start = 0;
@@ -182,7 +197,7 @@ static void __init pcibios_allocate_resources(int pass)
182 /* Turn the ROM off, leave the resource region, 197 /* Turn the ROM off, leave the resource region,
183 * but keep it unregistered. */ 198 * but keep it unregistered. */
184 u32 reg; 199 u32 reg;
185 dev_dbg(&dev->dev, "disabling ROM\n"); 200 dev_dbg(&dev->dev, "disabling ROM %pR\n", r);
186 r->flags &= ~IORESOURCE_ROM_ENABLE; 201 r->flags &= ~IORESOURCE_ROM_ENABLE;
187 pci_read_config_dword(dev, 202 pci_read_config_dword(dev,
188 dev->rom_base_reg, &reg); 203 dev->rom_base_reg, &reg);
@@ -282,6 +297,15 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
282 return -EINVAL; 297 return -EINVAL;
283 298
284 prot = pgprot_val(vma->vm_page_prot); 299 prot = pgprot_val(vma->vm_page_prot);
300
301 /*
302 * Return error if pat is not enabled and write_combine is requested.
303 * Caller can followup with UC MINUS request and add a WC mtrr if there
304 * is a free mtrr slot.
305 */
306 if (!pat_enabled && write_combine)
307 return -EINVAL;
308
285 if (pat_enabled && write_combine) 309 if (pat_enabled && write_combine)
286 prot |= _PAGE_CACHE_WC; 310 prot |= _PAGE_CACHE_WC;
287 else if (pat_enabled || boot_cpu_data.x86 > 3) 311 else if (pat_enabled || boot_cpu_data.x86 > 3)
diff --git a/arch/x86/pci/intel_bus.c b/arch/x86/pci/intel_bus.c
new file mode 100644
index 000000000000..b7a55dc55d13
--- /dev/null
+++ b/arch/x86/pci/intel_bus.c
@@ -0,0 +1,90 @@
1/*
2 * to read io range from IOH pci conf, need to do it after mmconfig is there
3 */
4
5#include <linux/delay.h>
6#include <linux/dmi.h>
7#include <linux/pci.h>
8#include <linux/init.h>
9#include <asm/pci_x86.h>
10
11#include "bus_numa.h"
12
13static inline void print_ioh_resources(struct pci_root_info *info)
14{
15 int res_num;
16 int busnum;
17 int i;
18
19 printk(KERN_DEBUG "IOH bus: [%02x, %02x]\n",
20 info->bus_min, info->bus_max);
21 res_num = info->res_num;
22 busnum = info->bus_min;
23 for (i = 0; i < res_num; i++) {
24 struct resource *res;
25
26 res = &info->res[i];
27 printk(KERN_DEBUG "IOH bus: %02x index %x %s: [%llx, %llx]\n",
28 busnum, i,
29 (res->flags & IORESOURCE_IO) ? "io port" :
30 "mmio",
31 res->start, res->end);
32 }
33}
34
35#define IOH_LIO 0x108
36#define IOH_LMMIOL 0x10c
37#define IOH_LMMIOH 0x110
38#define IOH_LMMIOH_BASEU 0x114
39#define IOH_LMMIOH_LIMITU 0x118
40#define IOH_LCFGBUS 0x11c
41
42static void __devinit pci_root_bus_res(struct pci_dev *dev)
43{
44 u16 word;
45 u32 dword;
46 struct pci_root_info *info;
47 u16 io_base, io_end;
48 u32 mmiol_base, mmiol_end;
49 u64 mmioh_base, mmioh_end;
50 int bus_base, bus_end;
51
52 if (pci_root_num >= PCI_ROOT_NR) {
53 printk(KERN_DEBUG "intel_bus.c: PCI_ROOT_NR is too small\n");
54 return;
55 }
56
57 info = &pci_root_info[pci_root_num];
58 pci_root_num++;
59
60 pci_read_config_word(dev, IOH_LCFGBUS, &word);
61 bus_base = (word & 0xff);
62 bus_end = (word & 0xff00) >> 8;
63 sprintf(info->name, "PCI Bus #%02x", bus_base);
64 info->bus_min = bus_base;
65 info->bus_max = bus_end;
66
67 pci_read_config_word(dev, IOH_LIO, &word);
68 io_base = (word & 0xf0) << (12 - 4);
69 io_end = (word & 0xf000) | 0xfff;
70 update_res(info, io_base, io_end, IORESOURCE_IO, 0);
71
72 pci_read_config_dword(dev, IOH_LMMIOL, &dword);
73 mmiol_base = (dword & 0xff00) << (24 - 8);
74 mmiol_end = (dword & 0xff000000) | 0xffffff;
75 update_res(info, mmiol_base, mmiol_end, IORESOURCE_MEM, 0);
76
77 pci_read_config_dword(dev, IOH_LMMIOH, &dword);
78 mmioh_base = ((u64)(dword & 0xfc00)) << (26 - 10);
79 mmioh_end = ((u64)(dword & 0xfc000000) | 0x3ffffff);
80 pci_read_config_dword(dev, IOH_LMMIOH_BASEU, &dword);
81 mmioh_base |= ((u64)(dword & 0x7ffff)) << 32;
82 pci_read_config_dword(dev, IOH_LMMIOH_LIMITU, &dword);
83 mmioh_end |= ((u64)(dword & 0x7ffff)) << 32;
84 update_res(info, mmioh_base, mmioh_end, IORESOURCE_MEM, 0);
85
86 print_ioh_resources(info);
87}
88
89/* intel IOH */
90DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, pci_root_bus_res);
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 602c172d3bd5..b19d1e54201e 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -15,48 +15,98 @@
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/sfi_acpi.h> 16#include <linux/sfi_acpi.h>
17#include <linux/bitmap.h> 17#include <linux/bitmap.h>
18#include <linux/sort.h> 18#include <linux/dmi.h>
19#include <asm/e820.h> 19#include <asm/e820.h>
20#include <asm/pci_x86.h> 20#include <asm/pci_x86.h>
21#include <asm/acpi.h> 21#include <asm/acpi.h>
22 22
23#define PREFIX "PCI: " 23#define PREFIX "PCI: "
24 24
25/* aperture is up to 256MB but BIOS may reserve less */
26#define MMCONFIG_APER_MIN (2 * 1024*1024)
27#define MMCONFIG_APER_MAX (256 * 1024*1024)
28
29/* Indicate if the mmcfg resources have been placed into the resource table. */ 25/* Indicate if the mmcfg resources have been placed into the resource table. */
30static int __initdata pci_mmcfg_resources_inserted; 26static int __initdata pci_mmcfg_resources_inserted;
31 27
32static __init int extend_mmcfg(int num) 28LIST_HEAD(pci_mmcfg_list);
29
30static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
33{ 31{
34 struct acpi_mcfg_allocation *new; 32 if (cfg->res.parent)
35 int new_num = pci_mmcfg_config_num + num; 33 release_resource(&cfg->res);
34 list_del(&cfg->list);
35 kfree(cfg);
36}
36 37
37 new = kzalloc(sizeof(pci_mmcfg_config[0]) * new_num, GFP_KERNEL); 38static __init void free_all_mmcfg(void)
38 if (!new) 39{
39 return -1; 40 struct pci_mmcfg_region *cfg, *tmp;
40 41
41 if (pci_mmcfg_config) { 42 pci_mmcfg_arch_free();
42 memcpy(new, pci_mmcfg_config, 43 list_for_each_entry_safe(cfg, tmp, &pci_mmcfg_list, list)
43 sizeof(pci_mmcfg_config[0]) * new_num); 44 pci_mmconfig_remove(cfg);
44 kfree(pci_mmcfg_config); 45}
46
47static __init void list_add_sorted(struct pci_mmcfg_region *new)
48{
49 struct pci_mmcfg_region *cfg;
50
51 /* keep list sorted by segment and starting bus number */
52 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
53 if (cfg->segment > new->segment ||
54 (cfg->segment == new->segment &&
55 cfg->start_bus >= new->start_bus)) {
56 list_add_tail(&new->list, &cfg->list);
57 return;
58 }
45 } 59 }
46 pci_mmcfg_config = new; 60 list_add_tail(&new->list, &pci_mmcfg_list);
61}
47 62
48 return 0; 63static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
64 int end, u64 addr)
65{
66 struct pci_mmcfg_region *new;
67 int num_buses;
68 struct resource *res;
69
70 if (addr == 0)
71 return NULL;
72
73 new = kzalloc(sizeof(*new), GFP_KERNEL);
74 if (!new)
75 return NULL;
76
77 new->address = addr;
78 new->segment = segment;
79 new->start_bus = start;
80 new->end_bus = end;
81
82 list_add_sorted(new);
83
84 num_buses = end - start + 1;
85 res = &new->res;
86 res->start = addr + PCI_MMCFG_BUS_OFFSET(start);
87 res->end = addr + PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
88 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
89 snprintf(new->name, PCI_MMCFG_RESOURCE_NAME_LEN,
90 "PCI MMCONFIG %04x [bus %02x-%02x]", segment, start, end);
91 res->name = new->name;
92
93 printk(KERN_INFO PREFIX "MMCONFIG for domain %04x [bus %02x-%02x] at "
94 "%pR (base %#lx)\n", segment, start, end, &new->res,
95 (unsigned long) addr);
96
97 return new;
49} 98}
50 99
51static __init void fill_one_mmcfg(u64 addr, int segment, int start, int end) 100struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus)
52{ 101{
53 int i = pci_mmcfg_config_num; 102 struct pci_mmcfg_region *cfg;
54 103
55 pci_mmcfg_config_num++; 104 list_for_each_entry(cfg, &pci_mmcfg_list, list)
56 pci_mmcfg_config[i].address = addr; 105 if (cfg->segment == segment &&
57 pci_mmcfg_config[i].pci_segment = segment; 106 cfg->start_bus <= bus && bus <= cfg->end_bus)
58 pci_mmcfg_config[i].start_bus_number = start; 107 return cfg;
59 pci_mmcfg_config[i].end_bus_number = end; 108
109 return NULL;
60} 110}
61 111
62static const char __init *pci_mmcfg_e7520(void) 112static const char __init *pci_mmcfg_e7520(void)
@@ -68,11 +118,9 @@ static const char __init *pci_mmcfg_e7520(void)
68 if (win == 0x0000 || win == 0xf000) 118 if (win == 0x0000 || win == 0xf000)
69 return NULL; 119 return NULL;
70 120
71 if (extend_mmcfg(1) == -1) 121 if (pci_mmconfig_add(0, 0, 255, win << 16) == NULL)
72 return NULL; 122 return NULL;
73 123
74 fill_one_mmcfg(win << 16, 0, 0, 255);
75
76 return "Intel Corporation E7520 Memory Controller Hub"; 124 return "Intel Corporation E7520 Memory Controller Hub";
77} 125}
78 126
@@ -114,11 +162,9 @@ static const char __init *pci_mmcfg_intel_945(void)
114 if ((pciexbar & mask) >= 0xf0000000U) 162 if ((pciexbar & mask) >= 0xf0000000U)
115 return NULL; 163 return NULL;
116 164
117 if (extend_mmcfg(1) == -1) 165 if (pci_mmconfig_add(0, 0, (len >> 20) - 1, pciexbar & mask) == NULL)
118 return NULL; 166 return NULL;
119 167
120 fill_one_mmcfg(pciexbar & mask, 0, 0, (len >> 20) - 1);
121
122 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub"; 168 return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
123} 169}
124 170
@@ -127,7 +173,7 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
127 u32 low, high, address; 173 u32 low, high, address;
128 u64 base, msr; 174 u64 base, msr;
129 int i; 175 int i;
130 unsigned segnbits = 0, busnbits; 176 unsigned segnbits = 0, busnbits, end_bus;
131 177
132 if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) 178 if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
133 return NULL; 179 return NULL;
@@ -161,11 +207,13 @@ static const char __init *pci_mmcfg_amd_fam10h(void)
161 busnbits = 8; 207 busnbits = 8;
162 } 208 }
163 209
164 if (extend_mmcfg(1 << segnbits) == -1) 210 end_bus = (1 << busnbits) - 1;
165 return NULL;
166
167 for (i = 0; i < (1 << segnbits); i++) 211 for (i = 0; i < (1 << segnbits); i++)
168 fill_one_mmcfg(base + (1<<28) * i, i, 0, (1 << busnbits) - 1); 212 if (pci_mmconfig_add(i, 0, end_bus,
213 base + (1<<28) * i) == NULL) {
214 free_all_mmcfg();
215 return NULL;
216 }
169 217
170 return "AMD Family 10h NB"; 218 return "AMD Family 10h NB";
171} 219}
@@ -190,7 +238,7 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
190 /* 238 /*
191 * do check if amd fam10h already took over 239 * do check if amd fam10h already took over
192 */ 240 */
193 if (!acpi_disabled || pci_mmcfg_config_num || mcp55_checked) 241 if (!acpi_disabled || !list_empty(&pci_mmcfg_list) || mcp55_checked)
194 return NULL; 242 return NULL;
195 243
196 mcp55_checked = true; 244 mcp55_checked = true;
@@ -213,16 +261,14 @@ static const char __init *pci_mmcfg_nvidia_mcp55(void)
213 if (!(extcfg & extcfg_enable_mask)) 261 if (!(extcfg & extcfg_enable_mask))
214 continue; 262 continue;
215 263
216 if (extend_mmcfg(1) == -1)
217 continue;
218
219 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift; 264 size_index = (extcfg & extcfg_size_mask) >> extcfg_size_shift;
220 base = extcfg & extcfg_base_mask[size_index]; 265 base = extcfg & extcfg_base_mask[size_index];
221 /* base could > 4G */ 266 /* base could > 4G */
222 base <<= extcfg_base_lshift; 267 base <<= extcfg_base_lshift;
223 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift; 268 start = (extcfg & extcfg_start_mask) >> extcfg_start_shift;
224 end = start + extcfg_sizebus[size_index] - 1; 269 end = start + extcfg_sizebus[size_index] - 1;
225 fill_one_mmcfg(base, 0, start, end); 270 if (pci_mmconfig_add(0, start, end, base) == NULL)
271 continue;
226 mcp55_mmconf_found++; 272 mcp55_mmconf_found++;
227 } 273 }
228 274
@@ -253,45 +299,27 @@ static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
253 0x0369, pci_mmcfg_nvidia_mcp55 }, 299 0x0369, pci_mmcfg_nvidia_mcp55 },
254}; 300};
255 301
256static int __init cmp_mmcfg(const void *x1, const void *x2)
257{
258 const typeof(pci_mmcfg_config[0]) *m1 = x1;
259 const typeof(pci_mmcfg_config[0]) *m2 = x2;
260 int start1, start2;
261
262 start1 = m1->start_bus_number;
263 start2 = m2->start_bus_number;
264
265 return start1 - start2;
266}
267
268static void __init pci_mmcfg_check_end_bus_number(void) 302static void __init pci_mmcfg_check_end_bus_number(void)
269{ 303{
270 int i; 304 struct pci_mmcfg_region *cfg, *cfgx;
271 typeof(pci_mmcfg_config[0]) *cfg, *cfgx;
272
273 /* sort them at first */
274 sort(pci_mmcfg_config, pci_mmcfg_config_num,
275 sizeof(pci_mmcfg_config[0]), cmp_mmcfg, NULL);
276 305
277 /* last one*/ 306 /* last one*/
278 if (pci_mmcfg_config_num > 0) { 307 cfg = list_entry(pci_mmcfg_list.prev, typeof(*cfg), list);
279 i = pci_mmcfg_config_num - 1; 308 if (cfg)
280 cfg = &pci_mmcfg_config[i]; 309 if (cfg->end_bus < cfg->start_bus)
281 if (cfg->end_bus_number < cfg->start_bus_number) 310 cfg->end_bus = 255;
282 cfg->end_bus_number = 255;
283 }
284 311
285 /* don't overlap please */ 312 if (list_is_singular(&pci_mmcfg_list))
286 for (i = 0; i < pci_mmcfg_config_num - 1; i++) { 313 return;
287 cfg = &pci_mmcfg_config[i];
288 cfgx = &pci_mmcfg_config[i+1];
289 314
290 if (cfg->end_bus_number < cfg->start_bus_number) 315 /* don't overlap please */
291 cfg->end_bus_number = 255; 316 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
317 if (cfg->end_bus < cfg->start_bus)
318 cfg->end_bus = 255;
292 319
293 if (cfg->end_bus_number >= cfgx->start_bus_number) 320 cfgx = list_entry(cfg->list.next, typeof(*cfg), list);
294 cfg->end_bus_number = cfgx->start_bus_number - 1; 321 if (cfg != cfgx && cfg->end_bus >= cfgx->start_bus)
322 cfg->end_bus = cfgx->start_bus - 1;
295 } 323 }
296} 324}
297 325
@@ -306,8 +334,7 @@ static int __init pci_mmcfg_check_hostbridge(void)
306 if (!raw_pci_ops) 334 if (!raw_pci_ops)
307 return 0; 335 return 0;
308 336
309 pci_mmcfg_config_num = 0; 337 free_all_mmcfg();
310 pci_mmcfg_config = NULL;
311 338
312 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) { 339 for (i = 0; i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
313 bus = pci_mmcfg_probes[i].bus; 340 bus = pci_mmcfg_probes[i].bus;
@@ -322,45 +349,22 @@ static int __init pci_mmcfg_check_hostbridge(void)
322 name = pci_mmcfg_probes[i].probe(); 349 name = pci_mmcfg_probes[i].probe();
323 350
324 if (name) 351 if (name)
325 printk(KERN_INFO "PCI: Found %s with MMCONFIG support.\n", 352 printk(KERN_INFO PREFIX "%s with MMCONFIG support\n",
326 name); 353 name);
327 } 354 }
328 355
329 /* some end_bus_number is crazy, fix it */ 356 /* some end_bus_number is crazy, fix it */
330 pci_mmcfg_check_end_bus_number(); 357 pci_mmcfg_check_end_bus_number();
331 358
332 return pci_mmcfg_config_num != 0; 359 return !list_empty(&pci_mmcfg_list);
333} 360}
334 361
335static void __init pci_mmcfg_insert_resources(void) 362static void __init pci_mmcfg_insert_resources(void)
336{ 363{
337#define PCI_MMCFG_RESOURCE_NAME_LEN 24 364 struct pci_mmcfg_region *cfg;
338 int i;
339 struct resource *res;
340 char *names;
341 unsigned num_buses;
342
343 res = kcalloc(PCI_MMCFG_RESOURCE_NAME_LEN + sizeof(*res),
344 pci_mmcfg_config_num, GFP_KERNEL);
345 if (!res) {
346 printk(KERN_ERR "PCI: Unable to allocate MMCONFIG resources\n");
347 return;
348 }
349 365
350 names = (void *)&res[pci_mmcfg_config_num]; 366 list_for_each_entry(cfg, &pci_mmcfg_list, list)
351 for (i = 0; i < pci_mmcfg_config_num; i++, res++) { 367 insert_resource(&iomem_resource, &cfg->res);
352 struct acpi_mcfg_allocation *cfg = &pci_mmcfg_config[i];
353 num_buses = cfg->end_bus_number - cfg->start_bus_number + 1;
354 res->name = names;
355 snprintf(names, PCI_MMCFG_RESOURCE_NAME_LEN,
356 "PCI MMCONFIG %u [%02x-%02x]", cfg->pci_segment,
357 cfg->start_bus_number, cfg->end_bus_number);
358 res->start = cfg->address + (cfg->start_bus_number << 20);
359 res->end = res->start + (num_buses << 20) - 1;
360 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
361 insert_resource(&iomem_resource, res);
362 names += PCI_MMCFG_RESOURCE_NAME_LEN;
363 }
364 368
365 /* Mark that the resources have been inserted. */ 369 /* Mark that the resources have been inserted. */
366 pci_mmcfg_resources_inserted = 1; 370 pci_mmcfg_resources_inserted = 1;
@@ -437,11 +441,12 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
437typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type); 441typedef int (*check_reserved_t)(u64 start, u64 end, unsigned type);
438 442
439static int __init is_mmconf_reserved(check_reserved_t is_reserved, 443static int __init is_mmconf_reserved(check_reserved_t is_reserved,
440 u64 addr, u64 size, int i, 444 struct pci_mmcfg_region *cfg, int with_e820)
441 typeof(pci_mmcfg_config[0]) *cfg, int with_e820)
442{ 445{
446 u64 addr = cfg->res.start;
447 u64 size = resource_size(&cfg->res);
443 u64 old_size = size; 448 u64 old_size = size;
444 int valid = 0; 449 int valid = 0, num_buses;
445 450
446 while (!is_reserved(addr, addr + size, E820_RESERVED)) { 451 while (!is_reserved(addr, addr + size, E820_RESERVED)) {
447 size >>= 1; 452 size >>= 1;
@@ -450,19 +455,25 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
450 } 455 }
451 456
452 if (size >= (16UL<<20) || size == old_size) { 457 if (size >= (16UL<<20) || size == old_size) {
453 printk(KERN_NOTICE 458 printk(KERN_INFO PREFIX "MMCONFIG at %pR reserved in %s\n",
454 "PCI: MCFG area at %Lx reserved in %s\n", 459 &cfg->res,
455 addr, with_e820?"E820":"ACPI motherboard resources"); 460 with_e820 ? "E820" : "ACPI motherboard resources");
456 valid = 1; 461 valid = 1;
457 462
458 if (old_size != size) { 463 if (old_size != size) {
459 /* update end_bus_number */ 464 /* update end_bus */
460 cfg->end_bus_number = cfg->start_bus_number + ((size>>20) - 1); 465 cfg->end_bus = cfg->start_bus + ((size>>20) - 1);
461 printk(KERN_NOTICE "PCI: updated MCFG configuration %d: base %lx " 466 num_buses = cfg->end_bus - cfg->start_bus + 1;
462 "segment %hu buses %u - %u\n", 467 cfg->res.end = cfg->res.start +
463 i, (unsigned long)cfg->address, cfg->pci_segment, 468 PCI_MMCFG_BUS_OFFSET(num_buses) - 1;
464 (unsigned int)cfg->start_bus_number, 469 snprintf(cfg->name, PCI_MMCFG_RESOURCE_NAME_LEN,
465 (unsigned int)cfg->end_bus_number); 470 "PCI MMCONFIG %04x [bus %02x-%02x]",
471 cfg->segment, cfg->start_bus, cfg->end_bus);
472 printk(KERN_INFO PREFIX
473 "MMCONFIG for %04x [bus%02x-%02x] "
474 "at %pR (base %#lx) (size reduced!)\n",
475 cfg->segment, cfg->start_bus, cfg->end_bus,
476 &cfg->res, (unsigned long) cfg->address);
466 } 477 }
467 } 478 }
468 479
@@ -471,45 +482,26 @@ static int __init is_mmconf_reserved(check_reserved_t is_reserved,
471 482
472static void __init pci_mmcfg_reject_broken(int early) 483static void __init pci_mmcfg_reject_broken(int early)
473{ 484{
474 typeof(pci_mmcfg_config[0]) *cfg; 485 struct pci_mmcfg_region *cfg;
475 int i;
476 486
477 if ((pci_mmcfg_config_num == 0) || 487 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
478 (pci_mmcfg_config == NULL) ||
479 (pci_mmcfg_config[0].address == 0))
480 return;
481
482 for (i = 0; i < pci_mmcfg_config_num; i++) {
483 int valid = 0; 488 int valid = 0;
484 u64 addr, size;
485
486 cfg = &pci_mmcfg_config[i];
487 addr = cfg->start_bus_number;
488 addr <<= 20;
489 addr += cfg->address;
490 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
491 size <<= 20;
492 printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
493 "segment %hu buses %u - %u\n",
494 i, (unsigned long)cfg->address, cfg->pci_segment,
495 (unsigned int)cfg->start_bus_number,
496 (unsigned int)cfg->end_bus_number);
497 489
498 if (!early && !acpi_disabled) 490 if (!early && !acpi_disabled)
499 valid = is_mmconf_reserved(is_acpi_reserved, addr, size, i, cfg, 0); 491 valid = is_mmconf_reserved(is_acpi_reserved, cfg, 0);
500 492
501 if (valid) 493 if (valid)
502 continue; 494 continue;
503 495
504 if (!early) 496 if (!early)
505 printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not" 497 printk(KERN_ERR FW_BUG PREFIX
506 " reserved in ACPI motherboard resources\n", 498 "MMCONFIG at %pR not reserved in "
507 cfg->address); 499 "ACPI motherboard resources\n", &cfg->res);
508 500
509 /* Don't try to do this check unless configuration 501 /* Don't try to do this check unless configuration
510 type 1 is available. how about type 2 ?*/ 502 type 1 is available. how about type 2 ?*/
511 if (raw_pci_ops) 503 if (raw_pci_ops)
512 valid = is_mmconf_reserved(e820_all_mapped, addr, size, i, cfg, 1); 504 valid = is_mmconf_reserved(e820_all_mapped, cfg, 1);
513 505
514 if (!valid) 506 if (!valid)
515 goto reject; 507 goto reject;
@@ -518,34 +510,41 @@ static void __init pci_mmcfg_reject_broken(int early)
518 return; 510 return;
519 511
520reject: 512reject:
521 printk(KERN_INFO "PCI: Not using MMCONFIG.\n"); 513 printk(KERN_INFO PREFIX "not using MMCONFIG\n");
522 pci_mmcfg_arch_free(); 514 free_all_mmcfg();
523 kfree(pci_mmcfg_config);
524 pci_mmcfg_config = NULL;
525 pci_mmcfg_config_num = 0;
526} 515}
527 516
528static int __initdata known_bridge; 517static int __initdata known_bridge;
529 518
530static int acpi_mcfg_64bit_base_addr __initdata = FALSE; 519static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg,
520 struct acpi_mcfg_allocation *cfg)
521{
522 int year;
531 523
532/* The physical address of the MMCONFIG aperture. Set from ACPI tables. */ 524 if (cfg->address < 0xFFFFFFFF)
533struct acpi_mcfg_allocation *pci_mmcfg_config; 525 return 0;
534int pci_mmcfg_config_num;
535 526
536static int __init acpi_mcfg_oem_check(struct acpi_table_mcfg *mcfg)
537{
538 if (!strcmp(mcfg->header.oem_id, "SGI")) 527 if (!strcmp(mcfg->header.oem_id, "SGI"))
539 acpi_mcfg_64bit_base_addr = TRUE; 528 return 0;
540 529
541 return 0; 530 if (mcfg->header.revision >= 1) {
531 if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) &&
532 year >= 2010)
533 return 0;
534 }
535
536 printk(KERN_ERR PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx "
537 "is above 4GB, ignored\n", cfg->pci_segment,
538 cfg->start_bus_number, cfg->end_bus_number, cfg->address);
539 return -EINVAL;
542} 540}
543 541
544static int __init pci_parse_mcfg(struct acpi_table_header *header) 542static int __init pci_parse_mcfg(struct acpi_table_header *header)
545{ 543{
546 struct acpi_table_mcfg *mcfg; 544 struct acpi_table_mcfg *mcfg;
545 struct acpi_mcfg_allocation *cfg_table, *cfg;
547 unsigned long i; 546 unsigned long i;
548 int config_size; 547 int entries;
549 548
550 if (!header) 549 if (!header)
551 return -EINVAL; 550 return -EINVAL;
@@ -553,38 +552,33 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
553 mcfg = (struct acpi_table_mcfg *)header; 552 mcfg = (struct acpi_table_mcfg *)header;
554 553
555 /* how many config structures do we have */ 554 /* how many config structures do we have */
556 pci_mmcfg_config_num = 0; 555 free_all_mmcfg();
556 entries = 0;
557 i = header->length - sizeof(struct acpi_table_mcfg); 557 i = header->length - sizeof(struct acpi_table_mcfg);
558 while (i >= sizeof(struct acpi_mcfg_allocation)) { 558 while (i >= sizeof(struct acpi_mcfg_allocation)) {
559 ++pci_mmcfg_config_num; 559 entries++;
560 i -= sizeof(struct acpi_mcfg_allocation); 560 i -= sizeof(struct acpi_mcfg_allocation);
561 }; 561 };
562 if (pci_mmcfg_config_num == 0) { 562 if (entries == 0) {
563 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n"); 563 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
564 return -ENODEV; 564 return -ENODEV;
565 } 565 }
566 566
567 config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config); 567 cfg_table = (struct acpi_mcfg_allocation *) &mcfg[1];
568 pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL); 568 for (i = 0; i < entries; i++) {
569 if (!pci_mmcfg_config) { 569 cfg = &cfg_table[i];
570 printk(KERN_WARNING PREFIX 570 if (acpi_mcfg_check_entry(mcfg, cfg)) {
571 "No memory for MCFG config tables\n"); 571 free_all_mmcfg();
572 return -ENOMEM;
573 }
574
575 memcpy(pci_mmcfg_config, &mcfg[1], config_size);
576
577 acpi_mcfg_oem_check(mcfg);
578
579 for (i = 0; i < pci_mmcfg_config_num; ++i) {
580 if ((pci_mmcfg_config[i].address > 0xFFFFFFFF) &&
581 !acpi_mcfg_64bit_base_addr) {
582 printk(KERN_ERR PREFIX
583 "MMCONFIG not in low 4GB of memory\n");
584 kfree(pci_mmcfg_config);
585 pci_mmcfg_config_num = 0;
586 return -ENODEV; 572 return -ENODEV;
587 } 573 }
574
575 if (pci_mmconfig_add(cfg->pci_segment, cfg->start_bus_number,
576 cfg->end_bus_number, cfg->address) == NULL) {
577 printk(KERN_WARNING PREFIX
578 "no memory for MCFG entries\n");
579 free_all_mmcfg();
580 return -ENOMEM;
581 }
588 } 582 }
589 583
590 return 0; 584 return 0;
@@ -614,9 +608,7 @@ static void __init __pci_mmcfg_init(int early)
614 608
615 pci_mmcfg_reject_broken(early); 609 pci_mmcfg_reject_broken(early);
616 610
617 if ((pci_mmcfg_config_num == 0) || 611 if (list_empty(&pci_mmcfg_list))
618 (pci_mmcfg_config == NULL) ||
619 (pci_mmcfg_config[0].address == 0))
620 return; 612 return;
621 613
622 if (pci_mmcfg_arch_init()) 614 if (pci_mmcfg_arch_init())
@@ -648,9 +640,7 @@ static int __init pci_mmcfg_late_insert_resources(void)
648 */ 640 */
649 if ((pci_mmcfg_resources_inserted == 1) || 641 if ((pci_mmcfg_resources_inserted == 1) ||
650 (pci_probe & PCI_PROBE_MMCONF) == 0 || 642 (pci_probe & PCI_PROBE_MMCONF) == 0 ||
651 (pci_mmcfg_config_num == 0) || 643 list_empty(&pci_mmcfg_list))
652 (pci_mmcfg_config == NULL) ||
653 (pci_mmcfg_config[0].address == 0))
654 return 1; 644 return 1;
655 645
656 /* 646 /*
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index f10a7e94a84c..90d5fd476ed4 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -27,18 +27,10 @@ static int mmcfg_last_accessed_cpu;
27 */ 27 */
28static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn) 28static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
29{ 29{
30 struct acpi_mcfg_allocation *cfg; 30 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
31 int cfg_num;
32
33 for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
34 cfg = &pci_mmcfg_config[cfg_num];
35 if (cfg->pci_segment == seg &&
36 (cfg->start_bus_number <= bus) &&
37 (cfg->end_bus_number >= bus))
38 return cfg->address;
39 }
40 31
41 /* Fall back to type 0 */ 32 if (cfg)
33 return cfg->address;
42 return 0; 34 return 0;
43} 35}
44 36
@@ -47,7 +39,7 @@ static u32 get_base_addr(unsigned int seg, int bus, unsigned devfn)
47 */ 39 */
48static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn) 40static void pci_exp_set_dev_base(unsigned int base, int bus, int devfn)
49{ 41{
50 u32 dev_base = base | (bus << 20) | (devfn << 12); 42 u32 dev_base = base | PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12);
51 int cpu = smp_processor_id(); 43 int cpu = smp_processor_id();
52 if (dev_base != mmcfg_last_accessed_device || 44 if (dev_base != mmcfg_last_accessed_device ||
53 cpu != mmcfg_last_accessed_cpu) { 45 cpu != mmcfg_last_accessed_cpu) {
diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
index 94349f8b2f96..e783841bd1d7 100644
--- a/arch/x86/pci/mmconfig_64.c
+++ b/arch/x86/pci/mmconfig_64.c
@@ -12,38 +12,15 @@
12#include <asm/e820.h> 12#include <asm/e820.h>
13#include <asm/pci_x86.h> 13#include <asm/pci_x86.h>
14 14
15/* Static virtual mapping of the MMCONFIG aperture */ 15#define PREFIX "PCI: "
16struct mmcfg_virt {
17 struct acpi_mcfg_allocation *cfg;
18 char __iomem *virt;
19};
20static struct mmcfg_virt *pci_mmcfg_virt;
21
22static char __iomem *get_virt(unsigned int seg, unsigned bus)
23{
24 struct acpi_mcfg_allocation *cfg;
25 int cfg_num;
26
27 for (cfg_num = 0; cfg_num < pci_mmcfg_config_num; cfg_num++) {
28 cfg = pci_mmcfg_virt[cfg_num].cfg;
29 if (cfg->pci_segment == seg &&
30 (cfg->start_bus_number <= bus) &&
31 (cfg->end_bus_number >= bus))
32 return pci_mmcfg_virt[cfg_num].virt;
33 }
34
35 /* Fall back to type 0 */
36 return NULL;
37}
38 16
39static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn) 17static char __iomem *pci_dev_base(unsigned int seg, unsigned int bus, unsigned int devfn)
40{ 18{
41 char __iomem *addr; 19 struct pci_mmcfg_region *cfg = pci_mmconfig_lookup(seg, bus);
42 20
43 addr = get_virt(seg, bus); 21 if (cfg && cfg->virt)
44 if (!addr) 22 return cfg->virt + (PCI_MMCFG_BUS_OFFSET(bus) | (devfn << 12));
45 return NULL; 23 return NULL;
46 return addr + ((bus << 20) | (devfn << 12));
47} 24}
48 25
49static int pci_mmcfg_read(unsigned int seg, unsigned int bus, 26static int pci_mmcfg_read(unsigned int seg, unsigned int bus,
@@ -109,42 +86,30 @@ static struct pci_raw_ops pci_mmcfg = {
109 .write = pci_mmcfg_write, 86 .write = pci_mmcfg_write,
110}; 87};
111 88
112static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg) 89static void __iomem * __init mcfg_ioremap(struct pci_mmcfg_region *cfg)
113{ 90{
114 void __iomem *addr; 91 void __iomem *addr;
115 u64 start, size; 92 u64 start, size;
93 int num_buses;
116 94
117 start = cfg->start_bus_number; 95 start = cfg->address + PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
118 start <<= 20; 96 num_buses = cfg->end_bus - cfg->start_bus + 1;
119 start += cfg->address; 97 size = PCI_MMCFG_BUS_OFFSET(num_buses);
120 size = cfg->end_bus_number + 1 - cfg->start_bus_number;
121 size <<= 20;
122 addr = ioremap_nocache(start, size); 98 addr = ioremap_nocache(start, size);
123 if (addr) { 99 if (addr)
124 printk(KERN_INFO "PCI: Using MMCONFIG at %Lx - %Lx\n", 100 addr -= PCI_MMCFG_BUS_OFFSET(cfg->start_bus);
125 start, start + size - 1);
126 addr -= cfg->start_bus_number << 20;
127 }
128 return addr; 101 return addr;
129} 102}
130 103
131int __init pci_mmcfg_arch_init(void) 104int __init pci_mmcfg_arch_init(void)
132{ 105{
133 int i; 106 struct pci_mmcfg_region *cfg;
134 pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) *
135 pci_mmcfg_config_num, GFP_KERNEL);
136 if (pci_mmcfg_virt == NULL) {
137 printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
138 return 0;
139 }
140 107
141 for (i = 0; i < pci_mmcfg_config_num; ++i) { 108 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
142 pci_mmcfg_virt[i].cfg = &pci_mmcfg_config[i]; 109 cfg->virt = mcfg_ioremap(cfg);
143 pci_mmcfg_virt[i].virt = mcfg_ioremap(&pci_mmcfg_config[i]); 110 if (!cfg->virt) {
144 if (!pci_mmcfg_virt[i].virt) { 111 printk(KERN_ERR PREFIX "can't map MMCONFIG at %pR\n",
145 printk(KERN_ERR "PCI: Cannot map mmconfig aperture for " 112 &cfg->res);
146 "segment %d\n",
147 pci_mmcfg_config[i].pci_segment);
148 pci_mmcfg_arch_free(); 113 pci_mmcfg_arch_free();
149 return 0; 114 return 0;
150 } 115 }
@@ -155,19 +120,12 @@ int __init pci_mmcfg_arch_init(void)
155 120
156void __init pci_mmcfg_arch_free(void) 121void __init pci_mmcfg_arch_free(void)
157{ 122{
158 int i; 123 struct pci_mmcfg_region *cfg;
159
160 if (pci_mmcfg_virt == NULL)
161 return;
162 124
163 for (i = 0; i < pci_mmcfg_config_num; ++i) { 125 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
164 if (pci_mmcfg_virt[i].virt) { 126 if (cfg->virt) {
165 iounmap(pci_mmcfg_virt[i].virt + (pci_mmcfg_virt[i].cfg->start_bus_number << 20)); 127 iounmap(cfg->virt + PCI_MMCFG_BUS_OFFSET(cfg->start_bus));
166 pci_mmcfg_virt[i].virt = NULL; 128 cfg->virt = NULL;
167 pci_mmcfg_virt[i].cfg = NULL;
168 } 129 }
169 } 130 }
170
171 kfree(pci_mmcfg_virt);
172 pci_mmcfg_virt = NULL;
173} 131}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index b8e45f164e2a..2b26dd5930c6 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -27,7 +27,9 @@
27#include <linux/page-flags.h> 27#include <linux/page-flags.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/console.h> 29#include <linux/console.h>
30#include <linux/pci.h>
30 31
32#include <xen/xen.h>
31#include <xen/interface/xen.h> 33#include <xen/interface/xen.h>
32#include <xen/interface/version.h> 34#include <xen/interface/version.h>
33#include <xen/interface/physdev.h> 35#include <xen/interface/physdev.h>
@@ -1175,7 +1177,11 @@ asmlinkage void __init xen_start_kernel(void)
1175 add_preferred_console("xenboot", 0, NULL); 1177 add_preferred_console("xenboot", 0, NULL);
1176 add_preferred_console("tty", 0, NULL); 1178 add_preferred_console("tty", 0, NULL);
1177 add_preferred_console("hvc", 0, NULL); 1179 add_preferred_console("hvc", 0, NULL);
1180 } else {
1181 /* Make sure ACS will be enabled */
1182 pci_request_acs();
1178 } 1183 }
1184
1179 1185
1180 xen_raw_console_write("about to get started...\n"); 1186 xen_raw_console_write("about to get started...\n");
1181 1187
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 05cebf8f62b1..4352dbe1186a 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -13,8 +13,6 @@ struct sigaction;
13asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*); 13asmlinkage long xtensa_execve(char*, char**, char**, struct pt_regs*);
14asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*); 14asmlinkage long xtensa_clone(unsigned long, unsigned long, struct pt_regs*);
15asmlinkage long xtensa_pipe(int __user *); 15asmlinkage long xtensa_pipe(int __user *);
16asmlinkage long xtensa_mmap2(unsigned long, unsigned long, unsigned long,
17 unsigned long, unsigned long, unsigned long);
18asmlinkage long xtensa_ptrace(long, long, long, long); 16asmlinkage long xtensa_ptrace(long, long, long, long);
19asmlinkage long xtensa_sigreturn(struct pt_regs*); 17asmlinkage long xtensa_sigreturn(struct pt_regs*);
20asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); 18asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index 4e55dc763021..fbf318b3af3e 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -189,7 +189,7 @@ __SYSCALL( 79, sys_fremovexattr, 2)
189/* File Map / Shared Memory Operations */ 189/* File Map / Shared Memory Operations */
190 190
191#define __NR_mmap2 80 191#define __NR_mmap2 80
192__SYSCALL( 80, xtensa_mmap2, 6) 192__SYSCALL( 80, sys_mmap_pgoff, 6)
193#define __NR_munmap 81 193#define __NR_munmap 81
194__SYSCALL( 81, sys_munmap, 2) 194__SYSCALL( 81, sys_munmap, 2)
195#define __NR_mprotect 82 195#define __NR_mprotect 82
diff --git a/arch/xtensa/kernel/syscall.c b/arch/xtensa/kernel/syscall.c
index ac15ecbdf919..1e67bab775c1 100644
--- a/arch/xtensa/kernel/syscall.c
+++ b/arch/xtensa/kernel/syscall.c
@@ -57,31 +57,6 @@ asmlinkage long xtensa_pipe(int __user *userfds)
57 return error; 57 return error;
58} 58}
59 59
60
61asmlinkage long xtensa_mmap2(unsigned long addr, unsigned long len,
62 unsigned long prot, unsigned long flags,
63 unsigned long fd, unsigned long pgoff)
64{
65 int error = -EBADF;
66 struct file * file = NULL;
67
68 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
69 if (!(flags & MAP_ANONYMOUS)) {
70 file = fget(fd);
71 if (!file)
72 goto out;
73 }
74
75 down_write(&current->mm->mmap_sem);
76 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
77 up_write(&current->mm->mmap_sem);
78
79 if (file)
80 fput(file);
81out:
82 return error;
83}
84
85asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg) 60asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
86{ 61{
87 unsigned long ret; 62 unsigned long ret;
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 7702118509a0..c7b10b4298e9 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,6 +19,7 @@ obj-y += acpi.o \
19 19
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += hest.o
22 23
23# sleep related files 24# sleep related files
24acpi-y += wakeup.o 25acpi-y += wakeup.o
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
new file mode 100644
index 000000000000..4bb18c980ac6
--- /dev/null
+++ b/drivers/acpi/hest.c
@@ -0,0 +1,135 @@
1#include <linux/acpi.h>
2#include <linux/pci.h>
3
4#define PREFIX "ACPI: "
5
6static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
7{
8 return sizeof(*p) +
9 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
10}
11
12static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
13{
14 return sizeof(*p) +
15 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
16}
17
18static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
19{
20 return sizeof(*p);
21}
22
23static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
24{
25 return sizeof(*p);
26}
27
28static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
29{
30 return (0 == pci_domain_nr(pci->bus) &&
31 p->bus == pci->bus->number &&
32 p->device == PCI_SLOT(pci->devfn) &&
33 p->function == PCI_FUNC(pci->devfn));
34}
35
36static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
37{
38 struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
39 unsigned long rc=0;
40 u8 pcie_type = 0;
41 u8 bridge = 0;
42 switch (type) {
43 case ACPI_HEST_TYPE_AER_ROOT_PORT:
44 rc = sizeof(struct acpi_hest_aer_root);
45 pcie_type = PCI_EXP_TYPE_ROOT_PORT;
46 break;
47 case ACPI_HEST_TYPE_AER_ENDPOINT:
48 rc = sizeof(struct acpi_hest_aer);
49 pcie_type = PCI_EXP_TYPE_ENDPOINT;
50 break;
51 case ACPI_HEST_TYPE_AER_BRIDGE:
52 rc = sizeof(struct acpi_hest_aer_bridge);
53 if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
54 bridge = 1;
55 break;
56 }
57
58 if (p->flags & ACPI_HEST_GLOBAL) {
59 if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
60 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
61 }
62 else
63 if (hest_match_pci(p, pci))
64 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
65 return rc;
66}
67
68static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
69{
70 struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
71 void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
72 struct acpi_hest_header *hdr = p;
73
74 int i;
75 int firmware_first = 0;
76 static unsigned char printed_unused = 0;
77 static unsigned char printed_reserved = 0;
78
79 for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
80 switch (hdr->type) {
81 case ACPI_HEST_TYPE_IA32_CHECK:
82 p += parse_acpi_hest_ia_machine_check(p);
83 break;
84 case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
85 p += parse_acpi_hest_ia_corrected(p);
86 break;
87 case ACPI_HEST_TYPE_IA32_NMI:
88 p += parse_acpi_hest_ia_nmi(p);
89 break;
90 /* These three should never appear */
91 case ACPI_HEST_TYPE_NOT_USED3:
92 case ACPI_HEST_TYPE_NOT_USED4:
93 case ACPI_HEST_TYPE_NOT_USED5:
94 if (!printed_unused) {
95 printk(KERN_DEBUG PREFIX
96 "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
97 printed_unused = 1;
98 }
99 break;
100 case ACPI_HEST_TYPE_AER_ROOT_PORT:
101 case ACPI_HEST_TYPE_AER_ENDPOINT:
102 case ACPI_HEST_TYPE_AER_BRIDGE:
103 p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
104 break;
105 case ACPI_HEST_TYPE_GENERIC_ERROR:
106 p += parse_acpi_hest_generic(p);
107 break;
108 /* These should never appear either */
109 case ACPI_HEST_TYPE_RESERVED:
110 default:
111 if (!printed_reserved) {
112 printk(KERN_DEBUG PREFIX
113 "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
114 printed_reserved = 1;
115 }
116 break;
117 }
118 }
119 return firmware_first;
120}
121
122int acpi_hest_firmware_first_pci(struct pci_dev *pci)
123{
124 acpi_status status = AE_NOT_FOUND;
125 struct acpi_table_header *hest = NULL;
126 status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
127
128 if (ACPI_SUCCESS(status)) {
129 if (acpi_hest_firmware_first(hest, pci)) {
130 return 1;
131 }
132 }
133 return 0;
134}
135EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b8578bb3f4c9..05a31e55d278 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -42,6 +42,7 @@
42#include <linux/module.h> 42#include <linux/module.h>
43#include <linux/scatterlist.h> 43#include <linux/scatterlist.h>
44 44
45#include <xen/xen.h>
45#include <xen/xenbus.h> 46#include <xen/xenbus.h>
46#include <xen/grant_table.h> 47#include <xen/grant_table.h>
47#include <xen/events.h> 48#include <xen/events.h>
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 44bc8bbabf54..4d2905996751 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -1066,7 +1066,7 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
1066 return 0; 1066 return 0;
1067 1067
1068 spin_lock_irq(&data->txlock); 1068 spin_lock_irq(&data->txlock);
1069 if (!(interface_to_usbdev(intf)->auto_pm && data->tx_in_flight)) { 1069 if (!((message.event & PM_EVENT_AUTO) && data->tx_in_flight)) {
1070 set_bit(BTUSB_SUSPENDING, &data->flags); 1070 set_bit(BTUSB_SUSPENDING, &data->flags);
1071 spin_unlock_irq(&data->txlock); 1071 spin_unlock_irq(&data->txlock);
1072 } else { 1072 } else {
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 3cb56a049e24..30c36ac2cd00 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -36,10 +36,10 @@
36#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12 36#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
37#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC 37#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
38#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE 38#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
39#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010 39#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB 0xA010
40#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011 40#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG 0xA011
41#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000 41#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB 0xA000
42#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001 42#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG 0xA001
43#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0 43#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
44#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2 44#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
45#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0 45#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@@ -50,20 +50,20 @@
50#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 50#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42
51#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 51#define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40
52#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 52#define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42
53#define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 53#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB 0x2E00
54#define PCI_DEVICE_ID_INTEL_IGD_E_IG 0x2E02 54#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG 0x2E02
55#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10 55#define PCI_DEVICE_ID_INTEL_Q45_HB 0x2E10
56#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12 56#define PCI_DEVICE_ID_INTEL_Q45_IG 0x2E12
57#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20 57#define PCI_DEVICE_ID_INTEL_G45_HB 0x2E20
58#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22 58#define PCI_DEVICE_ID_INTEL_G45_IG 0x2E22
59#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 59#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
60#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 60#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
61#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB 0x0040 61#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
62#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG 0x0042 62#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
63#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB 0x0044 63#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
64#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB 0x0062 64#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
65#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB 0x006a 65#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
66#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG 0x0046 66#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
67 67
68/* cover 915 and 945 variants */ 68/* cover 915 and 945 variants */
69#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \ 69#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -83,22 +83,22 @@
83#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \ 83#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \ 84 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
85 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \ 85 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
86 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ 86 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
87 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) 87 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
88 88
89#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \ 89#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
90 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB) 90 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
91 91
92#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \ 92#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \ 93 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ 94 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ 95 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ 96 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ 97 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ 98 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ 99 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \ 100 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB) 101 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
102 102
103extern int agp_memory_reserved; 103extern int agp_memory_reserved;
104 104
@@ -178,6 +178,7 @@ static struct _intel_private {
178 * popup and for the GTT. 178 * popup and for the GTT.
179 */ 179 */
180 int gtt_entries; /* i830+ */ 180 int gtt_entries; /* i830+ */
181 int gtt_total_size;
181 union { 182 union {
182 void __iomem *i9xx_flush_page; 183 void __iomem *i9xx_flush_page;
183 void *i8xx_flush_page; 184 void *i8xx_flush_page;
@@ -653,7 +654,7 @@ static void intel_i830_init_gtt_entries(void)
653 size = 512; 654 size = 512;
654 } 655 }
655 size += 4; /* add in BIOS popup space */ 656 size += 4; /* add in BIOS popup space */
656 } else if (IS_G33 && !IS_IGD) { 657 } else if (IS_G33 && !IS_PINEVIEW) {
657 /* G33's GTT size defined in gmch_ctrl */ 658 /* G33's GTT size defined in gmch_ctrl */
658 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) { 659 switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
659 case G33_PGETBL_SIZE_1M: 660 case G33_PGETBL_SIZE_1M:
@@ -669,7 +670,7 @@ static void intel_i830_init_gtt_entries(void)
669 size = 512; 670 size = 512;
670 } 671 }
671 size += 4; 672 size += 4;
672 } else if (IS_G4X || IS_IGD) { 673 } else if (IS_G4X || IS_PINEVIEW) {
673 /* On 4 series hardware, GTT stolen is separate from graphics 674 /* On 4 series hardware, GTT stolen is separate from graphics
674 * stolen, ignore it in stolen gtt entries counting. However, 675 * stolen, ignore it in stolen gtt entries counting. However,
675 * 4KB of the stolen memory doesn't get mapped to the GTT. 676 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -1153,7 +1154,7 @@ static int intel_i915_configure(void)
1153 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 1154 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
1154 1155
1155 if (agp_bridge->driver->needs_scratch_page) { 1156 if (agp_bridge->driver->needs_scratch_page) {
1156 for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) { 1157 for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
1157 writel(agp_bridge->scratch_page, intel_private.gtt+i); 1158 writel(agp_bridge->scratch_page, intel_private.gtt+i);
1158 } 1159 }
1159 readl(intel_private.gtt+i-1); /* PCI Posting. */ 1160 readl(intel_private.gtt+i-1); /* PCI Posting. */
@@ -1308,6 +1309,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
1308 if (!intel_private.gtt) 1309 if (!intel_private.gtt)
1309 return -ENOMEM; 1310 return -ENOMEM;
1310 1311
1312 intel_private.gtt_total_size = gtt_map_size / 4;
1313
1311 temp &= 0xfff80000; 1314 temp &= 0xfff80000;
1312 1315
1313 intel_private.registers = ioremap(temp, 128 * 4096); 1316 intel_private.registers = ioremap(temp, 128 * 4096);
@@ -1352,15 +1355,15 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
1352{ 1355{
1353 switch (agp_bridge->dev->device) { 1356 switch (agp_bridge->dev->device) {
1354 case PCI_DEVICE_ID_INTEL_GM45_HB: 1357 case PCI_DEVICE_ID_INTEL_GM45_HB:
1355 case PCI_DEVICE_ID_INTEL_IGD_E_HB: 1358 case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
1356 case PCI_DEVICE_ID_INTEL_Q45_HB: 1359 case PCI_DEVICE_ID_INTEL_Q45_HB:
1357 case PCI_DEVICE_ID_INTEL_G45_HB: 1360 case PCI_DEVICE_ID_INTEL_G45_HB:
1358 case PCI_DEVICE_ID_INTEL_G41_HB: 1361 case PCI_DEVICE_ID_INTEL_G41_HB:
1359 case PCI_DEVICE_ID_INTEL_B43_HB: 1362 case PCI_DEVICE_ID_INTEL_B43_HB:
1360 case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: 1363 case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
1361 case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: 1364 case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
1362 case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: 1365 case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
1363 case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB: 1366 case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
1364 *gtt_offset = *gtt_size = MB(2); 1367 *gtt_offset = *gtt_size = MB(2);
1365 break; 1368 break;
1366 default: 1369 default:
@@ -1395,6 +1398,8 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
1395 if (!intel_private.gtt) 1398 if (!intel_private.gtt)
1396 return -ENOMEM; 1399 return -ENOMEM;
1397 1400
1401 intel_private.gtt_total_size = gtt_size / 4;
1402
1398 intel_private.registers = ioremap(temp, 128 * 4096); 1403 intel_private.registers = ioremap(temp, 128 * 4096);
1399 if (!intel_private.registers) { 1404 if (!intel_private.registers) {
1400 iounmap(intel_private.gtt); 1405 iounmap(intel_private.gtt);
@@ -2340,14 +2345,14 @@ static const struct intel_driver_description {
2340 NULL, &intel_g33_driver }, 2345 NULL, &intel_g33_driver },
2341 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33", 2346 { PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
2342 NULL, &intel_g33_driver }, 2347 NULL, &intel_g33_driver },
2343 { PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD", 2348 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
2344 NULL, &intel_g33_driver }, 2349 NULL, &intel_g33_driver },
2345 { PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD", 2350 { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
2346 NULL, &intel_g33_driver }, 2351 NULL, &intel_g33_driver },
2347 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0, 2352 { PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
2348 "Mobile Intel® GM45 Express", NULL, &intel_i965_driver }, 2353 "GM45", NULL, &intel_i965_driver },
2349 { PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0, 2354 { PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
2350 "Intel Integrated Graphics Device", NULL, &intel_i965_driver }, 2355 "Eaglelake", NULL, &intel_i965_driver },
2351 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0, 2356 { PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
2352 "Q45/Q43", NULL, &intel_i965_driver }, 2357 "Q45/Q43", NULL, &intel_i965_driver },
2353 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, 2358 { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
@@ -2356,14 +2361,14 @@ static const struct intel_driver_description {
2356 "B43", NULL, &intel_i965_driver }, 2361 "B43", NULL, &intel_i965_driver },
2357 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, 2362 { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
2358 "G41", NULL, &intel_i965_driver }, 2363 "G41", NULL, &intel_i965_driver },
2359 { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, 2364 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
2360 "IGDNG/D", NULL, &intel_i965_driver }, 2365 "Ironlake/D", NULL, &intel_i965_driver },
2361 { PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2366 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2362 "IGDNG/M", NULL, &intel_i965_driver }, 2367 "Ironlake/M", NULL, &intel_i965_driver },
2363 { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2368 { PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2364 "IGDNG/MA", NULL, &intel_i965_driver }, 2369 "Ironlake/MA", NULL, &intel_i965_driver },
2365 { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0, 2370 { PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
2366 "IGDNG/MC2", NULL, &intel_i965_driver }, 2371 "Ironlake/MC2", NULL, &intel_i965_driver },
2367 { 0, 0, 0, NULL, NULL, NULL } 2372 { 0, 0, 0, NULL, NULL, NULL }
2368}; 2373};
2369 2374
@@ -2545,8 +2550,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
2545 ID(PCI_DEVICE_ID_INTEL_82945G_HB), 2550 ID(PCI_DEVICE_ID_INTEL_82945G_HB),
2546 ID(PCI_DEVICE_ID_INTEL_82945GM_HB), 2551 ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
2547 ID(PCI_DEVICE_ID_INTEL_82945GME_HB), 2552 ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
2548 ID(PCI_DEVICE_ID_INTEL_IGDGM_HB), 2553 ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
2549 ID(PCI_DEVICE_ID_INTEL_IGDG_HB), 2554 ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
2550 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB), 2555 ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
2551 ID(PCI_DEVICE_ID_INTEL_82G35_HB), 2556 ID(PCI_DEVICE_ID_INTEL_82G35_HB),
2552 ID(PCI_DEVICE_ID_INTEL_82965Q_HB), 2557 ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
@@ -2557,15 +2562,15 @@ static struct pci_device_id agp_intel_pci_table[] = {
2557 ID(PCI_DEVICE_ID_INTEL_Q35_HB), 2562 ID(PCI_DEVICE_ID_INTEL_Q35_HB),
2558 ID(PCI_DEVICE_ID_INTEL_Q33_HB), 2563 ID(PCI_DEVICE_ID_INTEL_Q33_HB),
2559 ID(PCI_DEVICE_ID_INTEL_GM45_HB), 2564 ID(PCI_DEVICE_ID_INTEL_GM45_HB),
2560 ID(PCI_DEVICE_ID_INTEL_IGD_E_HB), 2565 ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
2561 ID(PCI_DEVICE_ID_INTEL_Q45_HB), 2566 ID(PCI_DEVICE_ID_INTEL_Q45_HB),
2562 ID(PCI_DEVICE_ID_INTEL_G45_HB), 2567 ID(PCI_DEVICE_ID_INTEL_G45_HB),
2563 ID(PCI_DEVICE_ID_INTEL_G41_HB), 2568 ID(PCI_DEVICE_ID_INTEL_G41_HB),
2564 ID(PCI_DEVICE_ID_INTEL_B43_HB), 2569 ID(PCI_DEVICE_ID_INTEL_B43_HB),
2565 ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), 2570 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
2566 ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), 2571 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
2567 ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), 2572 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
2568 ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB), 2573 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
2569 { } 2574 { }
2570}; 2575};
2571 2576
diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
index a6ee32b599a8..b1a71638c772 100644
--- a/drivers/char/hvc_xen.c
+++ b/drivers/char/hvc_xen.c
@@ -25,6 +25,8 @@
25#include <linux/types.h> 25#include <linux/types.h>
26 26
27#include <asm/xen/hypervisor.h> 27#include <asm/xen/hypervisor.h>
28
29#include <xen/xen.h>
28#include <xen/page.h> 30#include <xen/page.h>
29#include <xen/events.h> 31#include <xen/events.h>
30#include <xen/interface/io/console.h> 32#include <xen/interface/io/console.h>
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index a38831c82649..5fdd6daa40ea 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -19,26 +19,48 @@ static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES];
19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; 19static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES];
20 20
21/* 21/*
22 * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only 22 * Address to DRAM bank mapping: see F2x80 for K8 and F2x[1,0]80 for Fam10 and
23 * for DDR2 DRAM mapping. 23 * later.
24 */ 24 */
25u32 revf_quad_ddr2_shift[] = { 25static int ddr2_dbam_revCG[] = {
26 0, /* 0000b NULL DIMM (128mb) */ 26 [0] = 32,
27 28, /* 0001b 256mb */ 27 [1] = 64,
28 29, /* 0010b 512mb */ 28 [2] = 128,
29 29, /* 0011b 512mb */ 29 [3] = 256,
30 29, /* 0100b 512mb */ 30 [4] = 512,
31 30, /* 0101b 1gb */ 31 [5] = 1024,
32 30, /* 0110b 1gb */ 32 [6] = 2048,
33 31, /* 0111b 2gb */ 33};
34 31, /* 1000b 2gb */ 34
35 32, /* 1001b 4gb */ 35static int ddr2_dbam_revD[] = {
36 32, /* 1010b 4gb */ 36 [0] = 32,
37 33, /* 1011b 8gb */ 37 [1] = 64,
38 0, /* 1100b future */ 38 [2 ... 3] = 128,
39 0, /* 1101b future */ 39 [4] = 256,
40 0, /* 1110b future */ 40 [5] = 512,
41 0 /* 1111b future */ 41 [6] = 256,
42 [7] = 512,
43 [8 ... 9] = 1024,
44 [10] = 2048,
45};
46
47static int ddr2_dbam[] = { [0] = 128,
48 [1] = 256,
49 [2 ... 4] = 512,
50 [5 ... 6] = 1024,
51 [7 ... 8] = 2048,
52 [9 ... 10] = 4096,
53 [11] = 8192,
54};
55
56static int ddr3_dbam[] = { [0] = -1,
57 [1] = 256,
58 [2] = 512,
59 [3 ... 4] = -1,
60 [5 ... 6] = 1024,
61 [7 ... 8] = 2048,
62 [9 ... 10] = 4096,
63 [11] = 8192,
42}; 64};
43 65
44/* 66/*
@@ -164,11 +186,9 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
164{ 186{
165 struct amd64_pvt *pvt = mci->pvt_info; 187 struct amd64_pvt *pvt = mci->pvt_info;
166 u32 scrubval = 0; 188 u32 scrubval = 0;
167 int status = -1, i, ret = 0; 189 int status = -1, i;
168 190
169 ret = pci_read_config_dword(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval); 191 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_SCRCTRL, &scrubval);
170 if (ret)
171 debugf0("Reading K8_SCRCTRL failed\n");
172 192
173 scrubval = scrubval & 0x001F; 193 scrubval = scrubval & 0x001F;
174 194
@@ -189,7 +209,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw)
189/* Map from a CSROW entry to the mask entry that operates on it */ 209/* Map from a CSROW entry to the mask entry that operates on it */
190static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) 210static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow)
191{ 211{
192 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) 212 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F)
193 return csrow; 213 return csrow;
194 else 214 else
195 return csrow >> 1; 215 return csrow >> 1;
@@ -437,7 +457,7 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
437 u64 base; 457 u64 base;
438 458
439 /* only revE and later have the DRAM Hole Address Register */ 459 /* only revE and later have the DRAM Hole Address Register */
440 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_E) { 460 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
441 debugf1(" revision %d for node %d does not support DHAR\n", 461 debugf1(" revision %d for node %d does not support DHAR\n",
442 pvt->ext_model, pvt->mc_node_id); 462 pvt->ext_model, pvt->mc_node_id);
443 return 1; 463 return 1;
@@ -743,21 +763,6 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
743 *input_addr_max = base | mask | pvt->dcs_mask_notused; 763 *input_addr_max = base | mask | pvt->dcs_mask_notused;
744} 764}
745 765
746/*
747 * Extract error address from MCA NB Address Low (section 3.6.4.5) and MCA NB
748 * Address High (section 3.6.4.6) register values and return the result. Address
749 * is located in the info structure (nbeah and nbeal), the encoding is device
750 * specific.
751 */
752static u64 extract_error_address(struct mem_ctl_info *mci,
753 struct err_regs *info)
754{
755 struct amd64_pvt *pvt = mci->pvt_info;
756
757 return pvt->ops->get_error_address(mci, info);
758}
759
760
761/* Map the Error address to a PAGE and PAGE OFFSET. */ 766/* Map the Error address to a PAGE and PAGE OFFSET. */
762static inline void error_address_to_page_and_offset(u64 error_address, 767static inline void error_address_to_page_and_offset(u64 error_address,
763 u32 *page, u32 *offset) 768 u32 *page, u32 *offset)
@@ -787,7 +792,7 @@ static int sys_addr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr)
787 return csrow; 792 return csrow;
788} 793}
789 794
790static int get_channel_from_ecc_syndrome(unsigned short syndrome); 795static int get_channel_from_ecc_syndrome(struct mem_ctl_info *, u16);
791 796
792static void amd64_cpu_display_info(struct amd64_pvt *pvt) 797static void amd64_cpu_display_info(struct amd64_pvt *pvt)
793{ 798{
@@ -797,7 +802,7 @@ static void amd64_cpu_display_info(struct amd64_pvt *pvt)
797 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n"); 802 edac_printk(KERN_DEBUG, EDAC_MC, "F10h CPU detected\n");
798 else if (boot_cpu_data.x86 == 0xf) 803 else if (boot_cpu_data.x86 == 0xf)
799 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n", 804 edac_printk(KERN_DEBUG, EDAC_MC, "%s detected\n",
800 (pvt->ext_model >= OPTERON_CPU_REV_F) ? 805 (pvt->ext_model >= K8_REV_F) ?
801 "Rev F or later" : "Rev E or earlier"); 806 "Rev F or later" : "Rev E or earlier");
802 else 807 else
803 /* we'll hardly ever ever get here */ 808 /* we'll hardly ever ever get here */
@@ -813,7 +818,7 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
813 int bit; 818 int bit;
814 enum dev_type edac_cap = EDAC_FLAG_NONE; 819 enum dev_type edac_cap = EDAC_FLAG_NONE;
815 820
816 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= OPTERON_CPU_REV_F) 821 bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
817 ? 19 822 ? 19
818 : 17; 823 : 17;
819 824
@@ -824,111 +829,86 @@ static enum edac_type amd64_determine_edac_cap(struct amd64_pvt *pvt)
824} 829}
825 830
826 831
827static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt, 832static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt);
828 int ganged); 833
834static void amd64_dump_dramcfg_low(u32 dclr, int chan)
835{
836 debugf1("F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
837
838 debugf1(" DIMM type: %sbuffered; all DIMMs support ECC: %s\n",
839 (dclr & BIT(16)) ? "un" : "",
840 (dclr & BIT(19)) ? "yes" : "no");
841
842 debugf1(" PAR/ERR parity: %s\n",
843 (dclr & BIT(8)) ? "enabled" : "disabled");
844
845 debugf1(" DCT 128bit mode width: %s\n",
846 (dclr & BIT(11)) ? "128b" : "64b");
847
848 debugf1(" x4 logical DIMMs present: L0: %s L1: %s L2: %s L3: %s\n",
849 (dclr & BIT(12)) ? "yes" : "no",
850 (dclr & BIT(13)) ? "yes" : "no",
851 (dclr & BIT(14)) ? "yes" : "no",
852 (dclr & BIT(15)) ? "yes" : "no");
853}
829 854
830/* Display and decode various NB registers for debug purposes. */ 855/* Display and decode various NB registers for debug purposes. */
831static void amd64_dump_misc_regs(struct amd64_pvt *pvt) 856static void amd64_dump_misc_regs(struct amd64_pvt *pvt)
832{ 857{
833 int ganged; 858 int ganged;
834 859
835 debugf1(" nbcap:0x%8.08x DctDualCap=%s DualNode=%s 8-Node=%s\n", 860 debugf1("F3xE8 (NB Cap): 0x%08x\n", pvt->nbcap);
836 pvt->nbcap,
837 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "True" : "False",
838 (pvt->nbcap & K8_NBCAP_DUAL_NODE) ? "True" : "False",
839 (pvt->nbcap & K8_NBCAP_8_NODE) ? "True" : "False");
840 debugf1(" ECC Capable=%s ChipKill Capable=%s\n",
841 (pvt->nbcap & K8_NBCAP_SECDED) ? "True" : "False",
842 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "True" : "False");
843 debugf1(" DramCfg0-low=0x%08x DIMM-ECC=%s Parity=%s Width=%s\n",
844 pvt->dclr0,
845 (pvt->dclr0 & BIT(19)) ? "Enabled" : "Disabled",
846 (pvt->dclr0 & BIT(8)) ? "Enabled" : "Disabled",
847 (pvt->dclr0 & BIT(11)) ? "128b" : "64b");
848 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s DIMM Type=%s\n",
849 (pvt->dclr0 & BIT(12)) ? "Y" : "N",
850 (pvt->dclr0 & BIT(13)) ? "Y" : "N",
851 (pvt->dclr0 & BIT(14)) ? "Y" : "N",
852 (pvt->dclr0 & BIT(15)) ? "Y" : "N",
853 (pvt->dclr0 & BIT(16)) ? "UN-Buffered" : "Buffered");
854
855
856 debugf1(" online-spare: 0x%8.08x\n", pvt->online_spare);
857 861
858 if (boot_cpu_data.x86 == 0xf) { 862 debugf1(" NB two channel DRAM capable: %s\n",
859 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n", 863 (pvt->nbcap & K8_NBCAP_DCT_DUAL) ? "yes" : "no");
860 pvt->dhar, dhar_base(pvt->dhar),
861 k8_dhar_offset(pvt->dhar));
862 debugf1(" DramHoleValid=%s\n",
863 (pvt->dhar & DHAR_VALID) ? "True" : "False");
864 864
865 debugf1(" dbam-dkt: 0x%8.08x\n", pvt->dbam0); 865 debugf1(" ECC capable: %s, ChipKill ECC capable: %s\n",
866 (pvt->nbcap & K8_NBCAP_SECDED) ? "yes" : "no",
867 (pvt->nbcap & K8_NBCAP_CHIPKILL) ? "yes" : "no");
866 868
867 /* everything below this point is Fam10h and above */ 869 amd64_dump_dramcfg_low(pvt->dclr0, 0);
868 return;
869 870
870 } else { 871 debugf1("F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
871 debugf1(" dhar: 0x%8.08x Base=0x%08x Offset=0x%08x\n",
872 pvt->dhar, dhar_base(pvt->dhar),
873 f10_dhar_offset(pvt->dhar));
874 debugf1(" DramMemHoistValid=%s DramHoleValid=%s\n",
875 (pvt->dhar & F10_DRAM_MEM_HOIST_VALID) ?
876 "True" : "False",
877 (pvt->dhar & DHAR_VALID) ?
878 "True" : "False");
879 }
880 872
881 /* Only if NOT ganged does dcl1 have valid info */ 873 debugf1("F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, "
882 if (!dct_ganging_enabled(pvt)) { 874 "offset: 0x%08x\n",
883 debugf1(" DramCfg1-low=0x%08x DIMM-ECC=%s Parity=%s " 875 pvt->dhar,
884 "Width=%s\n", pvt->dclr1, 876 dhar_base(pvt->dhar),
885 (pvt->dclr1 & BIT(19)) ? "Enabled" : "Disabled", 877 (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt->dhar)
886 (pvt->dclr1 & BIT(8)) ? "Enabled" : "Disabled", 878 : f10_dhar_offset(pvt->dhar));
887 (pvt->dclr1 & BIT(11)) ? "128b" : "64b"); 879
888 debugf1(" DIMM x4 Present: L0=%s L1=%s L2=%s L3=%s " 880 debugf1(" DramHoleValid: %s\n",
889 "DIMM Type=%s\n", 881 (pvt->dhar & DHAR_VALID) ? "yes" : "no");
890 (pvt->dclr1 & BIT(12)) ? "Y" : "N", 882
891 (pvt->dclr1 & BIT(13)) ? "Y" : "N", 883 /* everything below this point is Fam10h and above */
892 (pvt->dclr1 & BIT(14)) ? "Y" : "N", 884 if (boot_cpu_data.x86 == 0xf) {
893 (pvt->dclr1 & BIT(15)) ? "Y" : "N", 885 amd64_debug_display_dimm_sizes(0, pvt);
894 (pvt->dclr1 & BIT(16)) ? "UN-Buffered" : "Buffered"); 886 return;
895 } 887 }
896 888
889 /* Only if NOT ganged does dclr1 have valid info */
890 if (!dct_ganging_enabled(pvt))
891 amd64_dump_dramcfg_low(pvt->dclr1, 1);
892
897 /* 893 /*
898 * Determine if ganged and then dump memory sizes for first controller, 894 * Determine if ganged and then dump memory sizes for first controller,
899 * and if NOT ganged dump info for 2nd controller. 895 * and if NOT ganged dump info for 2nd controller.
900 */ 896 */
901 ganged = dct_ganging_enabled(pvt); 897 ganged = dct_ganging_enabled(pvt);
902 898
903 f10_debug_display_dimm_sizes(0, pvt, ganged); 899 amd64_debug_display_dimm_sizes(0, pvt);
904 900
905 if (!ganged) 901 if (!ganged)
906 f10_debug_display_dimm_sizes(1, pvt, ganged); 902 amd64_debug_display_dimm_sizes(1, pvt);
907} 903}
908 904
909/* Read in both of DBAM registers */ 905/* Read in both of DBAM registers */
910static void amd64_read_dbam_reg(struct amd64_pvt *pvt) 906static void amd64_read_dbam_reg(struct amd64_pvt *pvt)
911{ 907{
912 int err = 0; 908 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM0, &pvt->dbam0);
913 unsigned int reg;
914
915 reg = DBAM0;
916 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam0);
917 if (err)
918 goto err_reg;
919 909
920 if (boot_cpu_data.x86 >= 0x10) { 910 if (boot_cpu_data.x86 >= 0x10)
921 reg = DBAM1; 911 amd64_read_pci_cfg(pvt->dram_f2_ctl, DBAM1, &pvt->dbam1);
922 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dbam1);
923
924 if (err)
925 goto err_reg;
926 }
927
928 return;
929
930err_reg:
931 debugf0("Error reading F2x%03x.\n", reg);
932} 912}
933 913
934/* 914/*
@@ -963,7 +943,7 @@ err_reg:
963static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) 943static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
964{ 944{
965 945
966 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { 946 if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
967 pvt->dcsb_base = REV_E_DCSB_BASE_BITS; 947 pvt->dcsb_base = REV_E_DCSB_BASE_BITS;
968 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; 948 pvt->dcsm_mask = REV_E_DCSM_MASK_BITS;
969 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; 949 pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS;
@@ -991,28 +971,21 @@ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt)
991 */ 971 */
992static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) 972static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
993{ 973{
994 int cs, reg, err = 0; 974 int cs, reg;
995 975
996 amd64_set_dct_base_and_mask(pvt); 976 amd64_set_dct_base_and_mask(pvt);
997 977
998 for (cs = 0; cs < pvt->cs_count; cs++) { 978 for (cs = 0; cs < pvt->cs_count; cs++) {
999 reg = K8_DCSB0 + (cs * 4); 979 reg = K8_DCSB0 + (cs * 4);
1000 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 980 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]))
1001 &pvt->dcsb0[cs]);
1002 if (unlikely(err))
1003 debugf0("Reading K8_DCSB0[%d] failed\n", cs);
1004 else
1005 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n", 981 debugf0(" DCSB0[%d]=0x%08x reg: F2x%x\n",
1006 cs, pvt->dcsb0[cs], reg); 982 cs, pvt->dcsb0[cs], reg);
1007 983
1008 /* If DCT are NOT ganged, then read in DCT1's base */ 984 /* If DCT are NOT ganged, then read in DCT1's base */
1009 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 985 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1010 reg = F10_DCSB1 + (cs * 4); 986 reg = F10_DCSB1 + (cs * 4);
1011 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 987 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1012 &pvt->dcsb1[cs]); 988 &pvt->dcsb1[cs]))
1013 if (unlikely(err))
1014 debugf0("Reading F10_DCSB1[%d] failed\n", cs);
1015 else
1016 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n", 989 debugf0(" DCSB1[%d]=0x%08x reg: F2x%x\n",
1017 cs, pvt->dcsb1[cs], reg); 990 cs, pvt->dcsb1[cs], reg);
1018 } else { 991 } else {
@@ -1022,26 +995,20 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt)
1022 995
1023 for (cs = 0; cs < pvt->num_dcsm; cs++) { 996 for (cs = 0; cs < pvt->num_dcsm; cs++) {
1024 reg = K8_DCSM0 + (cs * 4); 997 reg = K8_DCSM0 + (cs * 4);
1025 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 998 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg, &pvt->dcsm0[cs]))
1026 &pvt->dcsm0[cs]);
1027 if (unlikely(err))
1028 debugf0("Reading K8_DCSM0 failed\n");
1029 else
1030 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n", 999 debugf0(" DCSM0[%d]=0x%08x reg: F2x%x\n",
1031 cs, pvt->dcsm0[cs], reg); 1000 cs, pvt->dcsm0[cs], reg);
1032 1001
1033 /* If DCT are NOT ganged, then read in DCT1's mask */ 1002 /* If DCT are NOT ganged, then read in DCT1's mask */
1034 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) { 1003 if (boot_cpu_data.x86 >= 0x10 && !dct_ganging_enabled(pvt)) {
1035 reg = F10_DCSM1 + (cs * 4); 1004 reg = F10_DCSM1 + (cs * 4);
1036 err = pci_read_config_dword(pvt->dram_f2_ctl, reg, 1005 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, reg,
1037 &pvt->dcsm1[cs]); 1006 &pvt->dcsm1[cs]))
1038 if (unlikely(err))
1039 debugf0("Reading F10_DCSM1[%d] failed\n", cs);
1040 else
1041 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n", 1007 debugf0(" DCSM1[%d]=0x%08x reg: F2x%x\n",
1042 cs, pvt->dcsm1[cs], reg); 1008 cs, pvt->dcsm1[cs], reg);
1043 } else 1009 } else {
1044 pvt->dcsm1[cs] = 0; 1010 pvt->dcsm1[cs] = 0;
1011 }
1045 } 1012 }
1046} 1013}
1047 1014
@@ -1049,18 +1016,16 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt)
1049{ 1016{
1050 enum mem_type type; 1017 enum mem_type type;
1051 1018
1052 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= OPTERON_CPU_REV_F) { 1019 if (boot_cpu_data.x86 >= 0x10 || pvt->ext_model >= K8_REV_F) {
1053 /* Rev F and later */ 1020 if (pvt->dchr0 & DDR3_MODE)
1054 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2; 1021 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
1022 else
1023 type = (pvt->dclr0 & BIT(16)) ? MEM_DDR2 : MEM_RDDR2;
1055 } else { 1024 } else {
1056 /* Rev E and earlier */
1057 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR; 1025 type = (pvt->dclr0 & BIT(18)) ? MEM_DDR : MEM_RDDR;
1058 } 1026 }
1059 1027
1060 debugf1(" Memory type is: %s\n", 1028 debugf1(" Memory type is: %s\n", edac_mem_types[type]);
1061 (type == MEM_DDR2) ? "MEM_DDR2" :
1062 (type == MEM_RDDR2) ? "MEM_RDDR2" :
1063 (type == MEM_DDR) ? "MEM_DDR" : "MEM_RDDR");
1064 1029
1065 return type; 1030 return type;
1066} 1031}
@@ -1078,11 +1043,11 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
1078{ 1043{
1079 int flag, err = 0; 1044 int flag, err = 0;
1080 1045
1081 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0); 1046 err = amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1082 if (err) 1047 if (err)
1083 return err; 1048 return err;
1084 1049
1085 if ((boot_cpu_data.x86_model >> 4) >= OPTERON_CPU_REV_F) { 1050 if ((boot_cpu_data.x86_model >> 4) >= K8_REV_F) {
1086 /* RevF (NPT) and later */ 1051 /* RevF (NPT) and later */
1087 flag = pvt->dclr0 & F10_WIDTH_128; 1052 flag = pvt->dclr0 & F10_WIDTH_128;
1088 } else { 1053 } else {
@@ -1114,22 +1079,15 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1114{ 1079{
1115 u32 low; 1080 u32 low;
1116 u32 off = dram << 3; /* 8 bytes between DRAM entries */ 1081 u32 off = dram << 3; /* 8 bytes between DRAM entries */
1117 int err;
1118 1082
1119 err = pci_read_config_dword(pvt->addr_f1_ctl, 1083 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_BASE_LOW + off, &low);
1120 K8_DRAM_BASE_LOW + off, &low);
1121 if (err)
1122 debugf0("Reading K8_DRAM_BASE_LOW failed\n");
1123 1084
1124 /* Extract parts into separate data entries */ 1085 /* Extract parts into separate data entries */
1125 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; 1086 pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8;
1126 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; 1087 pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7;
1127 pvt->dram_rw_en[dram] = (low & 0x3); 1088 pvt->dram_rw_en[dram] = (low & 0x3);
1128 1089
1129 err = pci_read_config_dword(pvt->addr_f1_ctl, 1090 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DRAM_LIMIT_LOW + off, &low);
1130 K8_DRAM_LIMIT_LOW + off, &low);
1131 if (err)
1132 debugf0("Reading K8_DRAM_LIMIT_LOW failed\n");
1133 1091
1134 /* 1092 /*
1135 * Extract parts into separate data entries. Limit is the HIGHEST memory 1093 * Extract parts into separate data entries. Limit is the HIGHEST memory
@@ -1142,7 +1100,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1142 1100
1143static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1101static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1144 struct err_regs *info, 1102 struct err_regs *info,
1145 u64 SystemAddress) 1103 u64 sys_addr)
1146{ 1104{
1147 struct mem_ctl_info *src_mci; 1105 struct mem_ctl_info *src_mci;
1148 unsigned short syndrome; 1106 unsigned short syndrome;
@@ -1155,7 +1113,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1155 1113
1156 /* CHIPKILL enabled */ 1114 /* CHIPKILL enabled */
1157 if (info->nbcfg & K8_NBCFG_CHIPKILL) { 1115 if (info->nbcfg & K8_NBCFG_CHIPKILL) {
1158 channel = get_channel_from_ecc_syndrome(syndrome); 1116 channel = get_channel_from_ecc_syndrome(mci, syndrome);
1159 if (channel < 0) { 1117 if (channel < 0) {
1160 /* 1118 /*
1161 * Syndrome didn't map, so we don't know which of the 1119 * Syndrome didn't map, so we don't know which of the
@@ -1177,64 +1135,46 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1177 * was obtained from email communication with someone at AMD. 1135 * was obtained from email communication with someone at AMD.
1178 * (Wish the email was placed in this comment - norsk) 1136 * (Wish the email was placed in this comment - norsk)
1179 */ 1137 */
1180 channel = ((SystemAddress & BIT(3)) != 0); 1138 channel = ((sys_addr & BIT(3)) != 0);
1181 } 1139 }
1182 1140
1183 /* 1141 /*
1184 * Find out which node the error address belongs to. This may be 1142 * Find out which node the error address belongs to. This may be
1185 * different from the node that detected the error. 1143 * different from the node that detected the error.
1186 */ 1144 */
1187 src_mci = find_mc_by_sys_addr(mci, SystemAddress); 1145 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1188 if (!src_mci) { 1146 if (!src_mci) {
1189 amd64_mc_printk(mci, KERN_ERR, 1147 amd64_mc_printk(mci, KERN_ERR,
1190 "failed to map error address 0x%lx to a node\n", 1148 "failed to map error address 0x%lx to a node\n",
1191 (unsigned long)SystemAddress); 1149 (unsigned long)sys_addr);
1192 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1150 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1193 return; 1151 return;
1194 } 1152 }
1195 1153
1196 /* Now map the SystemAddress to a CSROW */ 1154 /* Now map the sys_addr to a CSROW */
1197 csrow = sys_addr_to_csrow(src_mci, SystemAddress); 1155 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1198 if (csrow < 0) { 1156 if (csrow < 0) {
1199 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR); 1157 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1200 } else { 1158 } else {
1201 error_address_to_page_and_offset(SystemAddress, &page, &offset); 1159 error_address_to_page_and_offset(sys_addr, &page, &offset);
1202 1160
1203 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow, 1161 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1204 channel, EDAC_MOD_STR); 1162 channel, EDAC_MOD_STR);
1205 } 1163 }
1206} 1164}
1207 1165
1208/* 1166static int k8_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1209 * determrine the number of PAGES in for this DIMM's size based on its DRAM
1210 * Address Mapping.
1211 *
1212 * First step is to calc the number of bits to shift a value of 1 left to
1213 * indicate show many pages. Start with the DBAM value as the starting bits,
1214 * then proceed to adjust those shift bits, based on CPU rev and the table.
1215 * See BKDG on the DBAM
1216 */
1217static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1218{ 1167{
1219 int nr_pages; 1168 int *dbam_map;
1220 1169
1221 if (pvt->ext_model >= OPTERON_CPU_REV_F) { 1170 if (pvt->ext_model >= K8_REV_F)
1222 nr_pages = 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT); 1171 dbam_map = ddr2_dbam;
1223 } else { 1172 else if (pvt->ext_model >= K8_REV_D)
1224 /* 1173 dbam_map = ddr2_dbam_revD;
1225 * RevE and less section; this line is tricky. It collapses the 1174 else
1226 * table used by RevD and later to one that matches revisions CG 1175 dbam_map = ddr2_dbam_revCG;
1227 * and earlier.
1228 */
1229 dram_map -= (pvt->ext_model >= OPTERON_CPU_REV_D) ?
1230 (dram_map > 8 ? 4 : (dram_map > 5 ?
1231 3 : (dram_map > 2 ? 1 : 0))) : 0;
1232
1233 /* 25 shift is 32MiB minimum DIMM size in RevE and prior */
1234 nr_pages = 1 << (dram_map + 25 - PAGE_SHIFT);
1235 }
1236 1176
1237 return nr_pages; 1177 return dbam_map[cs_mode];
1238} 1178}
1239 1179
1240/* 1180/*
@@ -1248,34 +1188,24 @@ static int k8_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map)
1248static int f10_early_channel_count(struct amd64_pvt *pvt) 1188static int f10_early_channel_count(struct amd64_pvt *pvt)
1249{ 1189{
1250 int dbams[] = { DBAM0, DBAM1 }; 1190 int dbams[] = { DBAM0, DBAM1 };
1251 int err = 0, channels = 0; 1191 int i, j, channels = 0;
1252 int i, j;
1253 u32 dbam; 1192 u32 dbam;
1254 1193
1255 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
1256 if (err)
1257 goto err_reg;
1258
1259 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
1260 if (err)
1261 goto err_reg;
1262
1263 /* If we are in 128 bit mode, then we are using 2 channels */ 1194 /* If we are in 128 bit mode, then we are using 2 channels */
1264 if (pvt->dclr0 & F10_WIDTH_128) { 1195 if (pvt->dclr0 & F10_WIDTH_128) {
1265 debugf0("Data WIDTH is 128 bits - 2 channels\n");
1266 channels = 2; 1196 channels = 2;
1267 return channels; 1197 return channels;
1268 } 1198 }
1269 1199
1270 /* 1200 /*
1271 * Need to check if in UN-ganged mode: In such, there are 2 channels, 1201 * Need to check if in unganged mode: In such, there are 2 channels,
1272 * but they are NOT in 128 bit mode and thus the above 'dcl0' status bit 1202 * but they are not in 128 bit mode and thus the above 'dclr0' status
1273 * will be OFF. 1203 * bit will be OFF.
1274 * 1204 *
1275 * Need to check DCT0[0] and DCT1[0] to see if only one of them has 1205 * Need to check DCT0[0] and DCT1[0] to see if only one of them has
1276 * their CSEnable bit on. If so, then SINGLE DIMM case. 1206 * their CSEnable bit on. If so, then SINGLE DIMM case.
1277 */ 1207 */
1278 debugf0("Data WIDTH is NOT 128 bits - need more decoding\n"); 1208 debugf0("Data width is not 128 bits - need more decoding\n");
1279 1209
1280 /* 1210 /*
1281 * Check DRAM Bank Address Mapping values for each DIMM to see if there 1211 * Check DRAM Bank Address Mapping values for each DIMM to see if there
@@ -1283,8 +1213,7 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1283 * both controllers since DIMMs can be placed in either one. 1213 * both controllers since DIMMs can be placed in either one.
1284 */ 1214 */
1285 for (i = 0; i < ARRAY_SIZE(dbams); i++) { 1215 for (i = 0; i < ARRAY_SIZE(dbams); i++) {
1286 err = pci_read_config_dword(pvt->dram_f2_ctl, dbams[i], &dbam); 1216 if (amd64_read_pci_cfg(pvt->dram_f2_ctl, dbams[i], &dbam))
1287 if (err)
1288 goto err_reg; 1217 goto err_reg;
1289 1218
1290 for (j = 0; j < 4; j++) { 1219 for (j = 0; j < 4; j++) {
@@ -1295,6 +1224,9 @@ static int f10_early_channel_count(struct amd64_pvt *pvt)
1295 } 1224 }
1296 } 1225 }
1297 1226
1227 if (channels > 2)
1228 channels = 2;
1229
1298 debugf0("MCT channel count: %d\n", channels); 1230 debugf0("MCT channel count: %d\n", channels);
1299 1231
1300 return channels; 1232 return channels;
@@ -1304,9 +1236,16 @@ err_reg:
1304 1236
1305} 1237}
1306 1238
1307static int f10_dbam_map_to_pages(struct amd64_pvt *pvt, int dram_map) 1239static int f10_dbam_to_chip_select(struct amd64_pvt *pvt, int cs_mode)
1308{ 1240{
1309 return 1 << (revf_quad_ddr2_shift[dram_map] - PAGE_SHIFT); 1241 int *dbam_map;
1242
1243 if (pvt->dchr0 & DDR3_MODE || pvt->dchr1 & DDR3_MODE)
1244 dbam_map = ddr3_dbam;
1245 else
1246 dbam_map = ddr2_dbam;
1247
1248 return dbam_map[cs_mode];
1310} 1249}
1311 1250
1312/* Enable extended configuration access via 0xCF8 feature */ 1251/* Enable extended configuration access via 0xCF8 feature */
@@ -1314,7 +1253,7 @@ static void amd64_setup(struct amd64_pvt *pvt)
1314{ 1253{
1315 u32 reg; 1254 u32 reg;
1316 1255
1317 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg); 1256 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1318 1257
1319 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG); 1258 pvt->flags.cf8_extcfg = !!(reg & F10_NB_CFG_LOW_ENABLE_EXT_CFG);
1320 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1259 reg |= F10_NB_CFG_LOW_ENABLE_EXT_CFG;
@@ -1326,7 +1265,7 @@ static void amd64_teardown(struct amd64_pvt *pvt)
1326{ 1265{
1327 u32 reg; 1266 u32 reg;
1328 1267
1329 pci_read_config_dword(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg); 1268 amd64_read_pci_cfg(pvt->misc_f3_ctl, F10_NB_CFG_HIGH, &reg);
1330 1269
1331 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG; 1270 reg &= ~F10_NB_CFG_LOW_ENABLE_EXT_CFG;
1332 if (pvt->flags.cf8_extcfg) 1271 if (pvt->flags.cf8_extcfg)
@@ -1355,10 +1294,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1355 high_offset = F10_DRAM_BASE_HIGH + (dram << 3); 1294 high_offset = F10_DRAM_BASE_HIGH + (dram << 3);
1356 1295
1357 /* read the 'raw' DRAM BASE Address register */ 1296 /* read the 'raw' DRAM BASE Address register */
1358 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_base); 1297 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_base);
1359 1298
1360 /* Read from the ECS data register */ 1299 /* Read from the ECS data register */
1361 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_base); 1300 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_base);
1362 1301
1363 /* Extract parts into separate data entries */ 1302 /* Extract parts into separate data entries */
1364 pvt->dram_rw_en[dram] = (low_base & 0x3); 1303 pvt->dram_rw_en[dram] = (low_base & 0x3);
@@ -1375,13 +1314,10 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1375 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); 1314 high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3);
1376 1315
1377 /* read the 'raw' LIMIT registers */ 1316 /* read the 'raw' LIMIT registers */
1378 pci_read_config_dword(pvt->addr_f1_ctl, low_offset, &low_limit); 1317 amd64_read_pci_cfg(pvt->addr_f1_ctl, low_offset, &low_limit);
1379 1318
1380 /* Read from the ECS data register for the HIGH portion */ 1319 /* Read from the ECS data register for the HIGH portion */
1381 pci_read_config_dword(pvt->addr_f1_ctl, high_offset, &high_limit); 1320 amd64_read_pci_cfg(pvt->addr_f1_ctl, high_offset, &high_limit);
1382
1383 debugf0(" HW Regs: BASE=0x%08x-%08x LIMIT= 0x%08x-%08x\n",
1384 high_base, low_base, high_limit, low_limit);
1385 1321
1386 pvt->dram_DstNode[dram] = (low_limit & 0x7); 1322 pvt->dram_DstNode[dram] = (low_limit & 0x7);
1387 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7; 1323 pvt->dram_IntlvSel[dram] = (low_limit >> 8) & 0x7;
@@ -1397,32 +1333,35 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram)
1397 1333
1398static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) 1334static void f10_read_dram_ctl_register(struct amd64_pvt *pvt)
1399{ 1335{
1400 int err = 0;
1401 1336
1402 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW, 1337 if (!amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_LOW,
1403 &pvt->dram_ctl_select_low); 1338 &pvt->dram_ctl_select_low)) {
1404 if (err) { 1339 debugf0("F2x110 (DCTL Sel. Low): 0x%08x, "
1405 debugf0("Reading F10_DCTL_SEL_LOW failed\n"); 1340 "High range addresses at: 0x%x\n",
1406 } else { 1341 pvt->dram_ctl_select_low,
1407 debugf0("DRAM_DCTL_SEL_LOW=0x%x DctSelBaseAddr=0x%x\n", 1342 dct_sel_baseaddr(pvt));
1408 pvt->dram_ctl_select_low, dct_sel_baseaddr(pvt)); 1343
1409 1344 debugf0(" DCT mode: %s, All DCTs on: %s\n",
1410 debugf0(" DRAM DCTs are=%s DRAM Is=%s DRAM-Ctl-" 1345 (dct_ganging_enabled(pvt) ? "ganged" : "unganged"),
1411 "sel-hi-range=%s\n", 1346 (dct_dram_enabled(pvt) ? "yes" : "no"));
1412 (dct_ganging_enabled(pvt) ? "GANGED" : "NOT GANGED"), 1347
1413 (dct_dram_enabled(pvt) ? "Enabled" : "Disabled"), 1348 if (!dct_ganging_enabled(pvt))
1414 (dct_high_range_enabled(pvt) ? "Enabled" : "Disabled")); 1349 debugf0(" Address range split per DCT: %s\n",
1415 1350 (dct_high_range_enabled(pvt) ? "yes" : "no"));
1416 debugf0(" DctDatIntLv=%s MemCleared=%s DctSelIntLvAddr=0x%x\n", 1351
1417 (dct_data_intlv_enabled(pvt) ? "Enabled" : "Disabled"), 1352 debugf0(" DCT data interleave for ECC: %s, "
1418 (dct_memory_cleared(pvt) ? "True " : "False "), 1353 "DRAM cleared since last warm reset: %s\n",
1354 (dct_data_intlv_enabled(pvt) ? "enabled" : "disabled"),
1355 (dct_memory_cleared(pvt) ? "yes" : "no"));
1356
1357 debugf0(" DCT channel interleave: %s, "
1358 "DCT interleave bits selector: 0x%x\n",
1359 (dct_interleave_enabled(pvt) ? "enabled" : "disabled"),
1419 dct_sel_interleave_addr(pvt)); 1360 dct_sel_interleave_addr(pvt));
1420 } 1361 }
1421 1362
1422 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH, 1363 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCTL_SEL_HIGH,
1423 &pvt->dram_ctl_select_high); 1364 &pvt->dram_ctl_select_high);
1424 if (err)
1425 debugf0("Reading F10_DCTL_SEL_HIGH failed\n");
1426} 1365}
1427 1366
1428/* 1367/*
@@ -1706,10 +1645,11 @@ static int f10_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
1706} 1645}
1707 1646
1708/* 1647/*
1709 * This the F10h reference code from AMD to map a @sys_addr to NodeID, 1648 * For reference see "2.8.5 Routing DRAM Requests" in F10 BKDG. This code maps
1710 * CSROW, Channel. 1649 * a @sys_addr to NodeID, DCT (channel) and chip select (CSROW).
1711 * 1650 *
1712 * The @sys_addr is usually an error address received from the hardware. 1651 * The @sys_addr is usually an error address received from the hardware
1652 * (MCX_ADDR).
1713 */ 1653 */
1714static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci, 1654static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1715 struct err_regs *info, 1655 struct err_regs *info,
@@ -1722,133 +1662,76 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
1722 1662
1723 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1663 csrow = f10_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1724 1664
1725 if (csrow >= 0) { 1665 if (csrow < 0) {
1726 error_address_to_page_and_offset(sys_addr, &page, &offset);
1727
1728 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1729 syndrome |= LOW_SYNDROME(info->nbsh);
1730
1731 /*
1732 * Is CHIPKILL on? If so, then we can attempt to use the
1733 * syndrome to isolate which channel the error was on.
1734 */
1735 if (pvt->nbcfg & K8_NBCFG_CHIPKILL)
1736 chan = get_channel_from_ecc_syndrome(syndrome);
1737
1738 if (chan >= 0) {
1739 edac_mc_handle_ce(mci, page, offset, syndrome,
1740 csrow, chan, EDAC_MOD_STR);
1741 } else {
1742 /*
1743 * Channel unknown, report all channels on this
1744 * CSROW as failed.
1745 */
1746 for (chan = 0; chan < mci->csrows[csrow].nr_channels;
1747 chan++) {
1748 edac_mc_handle_ce(mci, page, offset,
1749 syndrome,
1750 csrow, chan,
1751 EDAC_MOD_STR);
1752 }
1753 }
1754
1755 } else {
1756 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1666 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1667 return;
1757 } 1668 }
1758}
1759 1669
1760/* 1670 error_address_to_page_and_offset(sys_addr, &page, &offset);
1761 * Input (@index) is the DBAM DIMM value (1 of 4) used as an index into a shift
1762 * table (revf_quad_ddr2_shift) which starts at 128MB DIMM size. Index of 0
1763 * indicates an empty DIMM slot, as reported by Hardware on empty slots.
1764 *
1765 * Normalize to 128MB by subracting 27 bit shift.
1766 */
1767static int map_dbam_to_csrow_size(int index)
1768{
1769 int mega_bytes = 0;
1770 1671
1771 if (index > 0 && index <= DBAM_MAX_VALUE) 1672 syndrome = HIGH_SYNDROME(info->nbsl) << 8;
1772 mega_bytes = ((128 << (revf_quad_ddr2_shift[index]-27))); 1673 syndrome |= LOW_SYNDROME(info->nbsh);
1773 1674
1774 return mega_bytes; 1675 /*
1676 * We need the syndromes for channel detection only when we're
1677 * ganged. Otherwise @chan should already contain the channel at
1678 * this point.
1679 */
1680 if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL)
1681 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1682
1683 if (chan >= 0)
1684 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan,
1685 EDAC_MOD_STR);
1686 else
1687 /*
1688 * Channel unknown, report all channels on this CSROW as failed.
1689 */
1690 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1691 edac_mc_handle_ce(mci, page, offset, syndrome,
1692 csrow, chan, EDAC_MOD_STR);
1775} 1693}
1776 1694
1777/* 1695/*
1778 * debug routine to display the memory sizes of a DIMM (ganged or not) and it 1696 * debug routine to display the memory sizes of all logical DIMMs and its
1779 * CSROWs as well 1697 * CSROWs as well
1780 */ 1698 */
1781static void f10_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt, 1699static void amd64_debug_display_dimm_sizes(int ctrl, struct amd64_pvt *pvt)
1782 int ganged)
1783{ 1700{
1784 int dimm, size0, size1; 1701 int dimm, size0, size1;
1785 u32 dbam; 1702 u32 dbam;
1786 u32 *dcsb; 1703 u32 *dcsb;
1787 1704
1788 debugf1(" dbam%d: 0x%8.08x CSROW is %s\n", ctrl, 1705 if (boot_cpu_data.x86 == 0xf) {
1789 ctrl ? pvt->dbam1 : pvt->dbam0, 1706 /* K8 families < revF not supported yet */
1790 ganged ? "GANGED - dbam1 not used" : "NON-GANGED"); 1707 if (pvt->ext_model < K8_REV_F)
1708 return;
1709 else
1710 WARN_ON(ctrl != 0);
1711 }
1712
1713 debugf1("F2x%d80 (DRAM Bank Address Mapping): 0x%08x\n",
1714 ctrl, ctrl ? pvt->dbam1 : pvt->dbam0);
1791 1715
1792 dbam = ctrl ? pvt->dbam1 : pvt->dbam0; 1716 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
1793 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0; 1717 dcsb = ctrl ? pvt->dcsb1 : pvt->dcsb0;
1794 1718
1719 edac_printk(KERN_DEBUG, EDAC_MC, "DCT%d chip selects:\n", ctrl);
1720
1795 /* Dump memory sizes for DIMM and its CSROWs */ 1721 /* Dump memory sizes for DIMM and its CSROWs */
1796 for (dimm = 0; dimm < 4; dimm++) { 1722 for (dimm = 0; dimm < 4; dimm++) {
1797 1723
1798 size0 = 0; 1724 size0 = 0;
1799 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE) 1725 if (dcsb[dimm*2] & K8_DCSB_CS_ENABLE)
1800 size0 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam)); 1726 size0 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1801 1727
1802 size1 = 0; 1728 size1 = 0;
1803 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE) 1729 if (dcsb[dimm*2 + 1] & K8_DCSB_CS_ENABLE)
1804 size1 = map_dbam_to_csrow_size(DBAM_DIMM(dimm, dbam)); 1730 size1 = pvt->ops->dbam_to_cs(pvt, DBAM_DIMM(dimm, dbam));
1805
1806 debugf1(" CTRL-%d DIMM-%d=%5dMB CSROW-%d=%5dMB "
1807 "CSROW-%d=%5dMB\n",
1808 ctrl,
1809 dimm,
1810 size0 + size1,
1811 dimm * 2,
1812 size0,
1813 dimm * 2 + 1,
1814 size1);
1815 }
1816}
1817
1818/*
1819 * Very early hardware probe on pci_probe thread to determine if this module
1820 * supports the hardware.
1821 *
1822 * Return:
1823 * 0 for OK
1824 * 1 for error
1825 */
1826static int f10_probe_valid_hardware(struct amd64_pvt *pvt)
1827{
1828 int ret = 0;
1829
1830 /*
1831 * If we are on a DDR3 machine, we don't know yet if
1832 * we support that properly at this time
1833 */
1834 if ((pvt->dchr0 & F10_DCHR_Ddr3Mode) ||
1835 (pvt->dchr1 & F10_DCHR_Ddr3Mode)) {
1836
1837 amd64_printk(KERN_WARNING,
1838 "%s() This machine is running with DDR3 memory. "
1839 "This is not currently supported. "
1840 "DCHR0=0x%x DCHR1=0x%x\n",
1841 __func__, pvt->dchr0, pvt->dchr1);
1842
1843 amd64_printk(KERN_WARNING,
1844 " Contact '%s' module MAINTAINER to help add"
1845 " support.\n",
1846 EDAC_MOD_STR);
1847
1848 ret = 1;
1849 1731
1732 edac_printk(KERN_DEBUG, EDAC_MC, " %d: %5dMB %d: %5dMB\n",
1733 dimm * 2, size0, dimm * 2 + 1, size1);
1850 } 1734 }
1851 return ret;
1852} 1735}
1853 1736
1854/* 1737/*
@@ -1868,11 +1751,11 @@ static struct amd64_family_type amd64_family_types[] = {
1868 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP, 1751 .addr_f1_ctl = PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
1869 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC, 1752 .misc_f3_ctl = PCI_DEVICE_ID_AMD_K8_NB_MISC,
1870 .ops = { 1753 .ops = {
1871 .early_channel_count = k8_early_channel_count, 1754 .early_channel_count = k8_early_channel_count,
1872 .get_error_address = k8_get_error_address, 1755 .get_error_address = k8_get_error_address,
1873 .read_dram_base_limit = k8_read_dram_base_limit, 1756 .read_dram_base_limit = k8_read_dram_base_limit,
1874 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow, 1757 .map_sysaddr_to_csrow = k8_map_sysaddr_to_csrow,
1875 .dbam_map_to_pages = k8_dbam_map_to_pages, 1758 .dbam_to_cs = k8_dbam_to_chip_select,
1876 } 1759 }
1877 }, 1760 },
1878 [F10_CPUS] = { 1761 [F10_CPUS] = {
@@ -1880,13 +1763,12 @@ static struct amd64_family_type amd64_family_types[] = {
1880 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP, 1763 .addr_f1_ctl = PCI_DEVICE_ID_AMD_10H_NB_MAP,
1881 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC, 1764 .misc_f3_ctl = PCI_DEVICE_ID_AMD_10H_NB_MISC,
1882 .ops = { 1765 .ops = {
1883 .probe_valid_hardware = f10_probe_valid_hardware, 1766 .early_channel_count = f10_early_channel_count,
1884 .early_channel_count = f10_early_channel_count, 1767 .get_error_address = f10_get_error_address,
1885 .get_error_address = f10_get_error_address, 1768 .read_dram_base_limit = f10_read_dram_base_limit,
1886 .read_dram_base_limit = f10_read_dram_base_limit, 1769 .read_dram_ctl_register = f10_read_dram_ctl_register,
1887 .read_dram_ctl_register = f10_read_dram_ctl_register, 1770 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1888 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1771 .dbam_to_cs = f10_dbam_to_chip_select,
1889 .dbam_map_to_pages = f10_dbam_map_to_pages,
1890 } 1772 }
1891 }, 1773 },
1892 [F11_CPUS] = { 1774 [F11_CPUS] = {
@@ -1894,13 +1776,12 @@ static struct amd64_family_type amd64_family_types[] = {
1894 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP, 1776 .addr_f1_ctl = PCI_DEVICE_ID_AMD_11H_NB_MAP,
1895 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC, 1777 .misc_f3_ctl = PCI_DEVICE_ID_AMD_11H_NB_MISC,
1896 .ops = { 1778 .ops = {
1897 .probe_valid_hardware = f10_probe_valid_hardware, 1779 .early_channel_count = f10_early_channel_count,
1898 .early_channel_count = f10_early_channel_count, 1780 .get_error_address = f10_get_error_address,
1899 .get_error_address = f10_get_error_address, 1781 .read_dram_base_limit = f10_read_dram_base_limit,
1900 .read_dram_base_limit = f10_read_dram_base_limit, 1782 .read_dram_ctl_register = f10_read_dram_ctl_register,
1901 .read_dram_ctl_register = f10_read_dram_ctl_register, 1783 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow,
1902 .map_sysaddr_to_csrow = f10_map_sysaddr_to_csrow, 1784 .dbam_to_cs = f10_dbam_to_chip_select,
1903 .dbam_map_to_pages = f10_dbam_map_to_pages,
1904 } 1785 }
1905 }, 1786 },
1906}; 1787};
@@ -1923,142 +1804,170 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
1923} 1804}
1924 1805
1925/* 1806/*
1926 * syndrome mapping table for ECC ChipKill devices 1807 * These are tables of eigenvectors (one per line) which can be used for the
1927 * 1808 * construction of the syndrome tables. The modified syndrome search algorithm
1928 * The comment in each row is the token (nibble) number that is in error. 1809 * uses those to find the symbol in error and thus the DIMM.
1929 * The least significant nibble of the syndrome is the mask for the bits
1930 * that are in error (need to be toggled) for the particular nibble.
1931 *
1932 * Each row contains 16 entries.
1933 * The first entry (0th) is the channel number for that row of syndromes.
1934 * The remaining 15 entries are the syndromes for the respective Error
1935 * bit mask index.
1936 *
1937 * 1st index entry is 0x0001 mask, indicating that the rightmost bit is the
1938 * bit in error.
1939 * The 2nd index entry is 0x0010 that the second bit is damaged.
1940 * The 3rd index entry is 0x0011 indicating that the rightmost 2 bits
1941 * are damaged.
1942 * Thus so on until index 15, 0x1111, whose entry has the syndrome
1943 * indicating that all 4 bits are damaged.
1944 *
1945 * A search is performed on this table looking for a given syndrome.
1946 * 1810 *
1947 * See the AMD documentation for ECC syndromes. This ECC table is valid 1811 * Algorithm courtesy of Ross LaFetra from AMD.
1948 * across all the versions of the AMD64 processors.
1949 *
1950 * A fast lookup is to use the LAST four bits of the 16-bit syndrome as a
1951 * COLUMN index, then search all ROWS of that column, looking for a match
1952 * with the input syndrome. The ROW value will be the token number.
1953 *
1954 * The 0'th entry on that row, can be returned as the CHANNEL (0 or 1) of this
1955 * error.
1956 */ 1812 */
1957#define NUMBER_ECC_ROWS 36 1813static u16 x4_vectors[] = {
1958static const unsigned short ecc_chipkill_syndromes[NUMBER_ECC_ROWS][16] = { 1814 0x2f57, 0x1afe, 0x66cc, 0xdd88,
1959 /* Channel 0 syndromes */ 1815 0x11eb, 0x3396, 0x7f4c, 0xeac8,
1960 {/*0*/ 0, 0xe821, 0x7c32, 0x9413, 0xbb44, 0x5365, 0xc776, 0x2f57, 1816 0x0001, 0x0002, 0x0004, 0x0008,
1961 0xdd88, 0x35a9, 0xa1ba, 0x499b, 0x66cc, 0x8eed, 0x1afe, 0xf2df }, 1817 0x1013, 0x3032, 0x4044, 0x8088,
1962 {/*1*/ 0, 0x5d31, 0xa612, 0xfb23, 0x9584, 0xc8b5, 0x3396, 0x6ea7, 1818 0x106b, 0x30d6, 0x70fc, 0xe0a8,
1963 0xeac8, 0xb7f9, 0x4cda, 0x11eb, 0x7f4c, 0x227d, 0xd95e, 0x846f }, 1819 0x4857, 0xc4fe, 0x13cc, 0x3288,
1964 {/*2*/ 0, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 1820 0x1ac5, 0x2f4a, 0x5394, 0xa1e8,
1965 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f }, 1821 0x1f39, 0x251e, 0xbd6c, 0x6bd8,
1966 {/*3*/ 0, 0x2021, 0x3032, 0x1013, 0x4044, 0x6065, 0x7076, 0x5057, 1822 0x15c1, 0x2a42, 0x89ac, 0x4758,
1967 0x8088, 0xa0a9, 0xb0ba, 0x909b, 0xc0cc, 0xe0ed, 0xf0fe, 0xd0df }, 1823 0x2b03, 0x1602, 0x4f0c, 0xca08,
1968 {/*4*/ 0, 0x5041, 0xa082, 0xf0c3, 0x9054, 0xc015, 0x30d6, 0x6097, 1824 0x1f07, 0x3a0e, 0x6b04, 0xbd08,
1969 0xe0a8, 0xb0e9, 0x402a, 0x106b, 0x70fc, 0x20bd, 0xd07e, 0x803f }, 1825 0x8ba7, 0x465e, 0x244c, 0x1cc8,
1970 {/*5*/ 0, 0xbe21, 0xd732, 0x6913, 0x2144, 0x9f65, 0xf676, 0x4857, 1826 0x2b87, 0x164e, 0x642c, 0xdc18,
1971 0x3288, 0x8ca9, 0xe5ba, 0x5b9b, 0x13cc, 0xaded, 0xc4fe, 0x7adf }, 1827 0x40b9, 0x80de, 0x1094, 0x20e8,
1972 {/*6*/ 0, 0x4951, 0x8ea2, 0xc7f3, 0x5394, 0x1ac5, 0xdd36, 0x9467, 1828 0x27db, 0x1eb6, 0x9dac, 0x7b58,
1973 0xa1e8, 0xe8b9, 0x2f4a, 0x661b, 0xf27c, 0xbb2d, 0x7cde, 0x358f }, 1829 0x11c1, 0x2242, 0x84ac, 0x4c58,
1974 {/*7*/ 0, 0x74e1, 0x9872, 0xec93, 0xd6b4, 0xa255, 0x4ec6, 0x3a27, 1830 0x1be5, 0x2d7a, 0x5e34, 0xa718,
1975 0x6bd8, 0x1f39, 0xf3aa, 0x874b, 0xbd6c, 0xc98d, 0x251e, 0x51ff }, 1831 0x4b39, 0x8d1e, 0x14b4, 0x28d8,
1976 {/*8*/ 0, 0x15c1, 0x2a42, 0x3f83, 0xcef4, 0xdb35, 0xe4b6, 0xf177, 1832 0x4c97, 0xc87e, 0x11fc, 0x33a8,
1977 0x4758, 0x5299, 0x6d1a, 0x78db, 0x89ac, 0x9c6d, 0xa3ee, 0xb62f }, 1833 0x8e97, 0x497e, 0x2ffc, 0x1aa8,
1978 {/*9*/ 0, 0x3d01, 0x1602, 0x2b03, 0x8504, 0xb805, 0x9306, 0xae07, 1834 0x16b3, 0x3d62, 0x4f34, 0x8518,
1979 0xca08, 0xf709, 0xdc0a, 0xe10b, 0x4f0c, 0x720d, 0x590e, 0x640f }, 1835 0x1e2f, 0x391a, 0x5cac, 0xf858,
1980 {/*a*/ 0, 0x9801, 0xec02, 0x7403, 0x6b04, 0xf305, 0x8706, 0x1f07, 1836 0x1d9f, 0x3b7a, 0x572c, 0xfe18,
1981 0xbd08, 0x2509, 0x510a, 0xc90b, 0xd60c, 0x4e0d, 0x3a0e, 0xa20f }, 1837 0x15f5, 0x2a5a, 0x5264, 0xa3b8,
1982 {/*b*/ 0, 0xd131, 0x6212, 0xb323, 0x3884, 0xe9b5, 0x5a96, 0x8ba7, 1838 0x1dbb, 0x3b66, 0x715c, 0xe3f8,
1983 0x1cc8, 0xcdf9, 0x7eda, 0xafeb, 0x244c, 0xf57d, 0x465e, 0x976f }, 1839 0x4397, 0xc27e, 0x17fc, 0x3ea8,
1984 {/*c*/ 0, 0xe1d1, 0x7262, 0x93b3, 0xb834, 0x59e5, 0xca56, 0x2b87, 1840 0x1617, 0x3d3e, 0x6464, 0xb8b8,
1985 0xdc18, 0x3dc9, 0xae7a, 0x4fab, 0x542c, 0x85fd, 0x164e, 0xf79f }, 1841 0x23ff, 0x12aa, 0xab6c, 0x56d8,
1986 {/*d*/ 0, 0x6051, 0xb0a2, 0xd0f3, 0x1094, 0x70c5, 0xa036, 0xc067, 1842 0x2dfb, 0x1ba6, 0x913c, 0x7328,
1987 0x20e8, 0x40b9, 0x904a, 0x601b, 0x307c, 0x502d, 0x80de, 0xe08f }, 1843 0x185d, 0x2ca6, 0x7914, 0x9e28,
1988 {/*e*/ 0, 0xa4c1, 0xf842, 0x5c83, 0xe6f4, 0x4235, 0x1eb6, 0xba77, 1844 0x171b, 0x3e36, 0x7d7c, 0xebe8,
1989 0x7b58, 0xdf99, 0x831a, 0x27db, 0x9dac, 0x396d, 0x65ee, 0xc12f }, 1845 0x4199, 0x82ee, 0x19f4, 0x2e58,
1990 {/*f*/ 0, 0x11c1, 0x2242, 0x3383, 0xc8f4, 0xd935, 0xeab6, 0xfb77, 1846 0x4807, 0xc40e, 0x130c, 0x3208,
1991 0x4c58, 0x5d99, 0x6e1a, 0x7fdb, 0x84ac, 0x956d, 0xa6ee, 0xb72f }, 1847 0x1905, 0x2e0a, 0x5804, 0xac08,
1992 1848 0x213f, 0x132a, 0xadfc, 0x5ba8,
1993 /* Channel 1 syndromes */ 1849 0x19a9, 0x2efe, 0xb5cc, 0x6f88,
1994 {/*10*/ 1, 0x45d1, 0x8a62, 0xcfb3, 0x5e34, 0x1be5, 0xd456, 0x9187,
1995 0xa718, 0xe2c9, 0x2d7a, 0x68ab, 0xf92c, 0xbcfd, 0x734e, 0x369f },
1996 {/*11*/ 1, 0x63e1, 0xb172, 0xd293, 0x14b4, 0x7755, 0xa5c6, 0xc627,
1997 0x28d8, 0x4b39, 0x99aa, 0xfa4b, 0x3c6c, 0x5f8d, 0x8d1e, 0xeeff },
1998 {/*12*/ 1, 0xb741, 0xd982, 0x6ec3, 0x2254, 0x9515, 0xfbd6, 0x4c97,
1999 0x33a8, 0x84e9, 0xea2a, 0x5d6b, 0x11fc, 0xa6bd, 0xc87e, 0x7f3f },
2000 {/*13*/ 1, 0xdd41, 0x6682, 0xbbc3, 0x3554, 0xe815, 0x53d6, 0xce97,
2001 0x1aa8, 0xc7e9, 0x7c2a, 0xa1fb, 0x2ffc, 0xf2bd, 0x497e, 0x943f },
2002 {/*14*/ 1, 0x2bd1, 0x3d62, 0x16b3, 0x4f34, 0x64e5, 0x7256, 0x5987,
2003 0x8518, 0xaec9, 0xb87a, 0x93ab, 0xca2c, 0xe1fd, 0xf74e, 0xdc9f },
2004 {/*15*/ 1, 0x83c1, 0xc142, 0x4283, 0xa4f4, 0x2735, 0x65b6, 0xe677,
2005 0xf858, 0x7b99, 0x391a, 0xbadb, 0x5cac, 0xdf6d, 0x9dee, 0x1e2f },
2006 {/*16*/ 1, 0x8fd1, 0xc562, 0x4ab3, 0xa934, 0x26e5, 0x6c56, 0xe387,
2007 0xfe18, 0x71c9, 0x3b7a, 0xb4ab, 0x572c, 0xd8fd, 0x924e, 0x1d9f },
2008 {/*17*/ 1, 0x4791, 0x89e2, 0xce73, 0x5264, 0x15f5, 0xdb86, 0x9c17,
2009 0xa3b8, 0xe429, 0x2a5a, 0x6dcb, 0xf1dc, 0xb64d, 0x783e, 0x3faf },
2010 {/*18*/ 1, 0x5781, 0xa9c2, 0xfe43, 0x92a4, 0xc525, 0x3b66, 0x6ce7,
2011 0xe3f8, 0xb479, 0x4a3a, 0x1dbb, 0x715c, 0x26dd, 0xd89e, 0x8f1f },
2012 {/*19*/ 1, 0xbf41, 0xd582, 0x6ac3, 0x2954, 0x9615, 0xfcd6, 0x4397,
2013 0x3ea8, 0x81e9, 0xeb2a, 0x546b, 0x17fc, 0xa8bd, 0xc27e, 0x7d3f },
2014 {/*1a*/ 1, 0x9891, 0xe1e2, 0x7273, 0x6464, 0xf7f5, 0x8586, 0x1617,
2015 0xb8b8, 0x2b29, 0x595a, 0xcacb, 0xdcdc, 0x4f4d, 0x3d3e, 0xaeaf },
2016 {/*1b*/ 1, 0xcce1, 0x4472, 0x8893, 0xfdb4, 0x3f55, 0xb9c6, 0x7527,
2017 0x56d8, 0x9a39, 0x12aa, 0xde4b, 0xab6c, 0x678d, 0xef1e, 0x23ff },
2018 {/*1c*/ 1, 0xa761, 0xf9b2, 0x5ed3, 0xe214, 0x4575, 0x1ba6, 0xbcc7,
2019 0x7328, 0xd449, 0x8a9a, 0x2dfb, 0x913c, 0x365d, 0x688e, 0xcfef },
2020 {/*1d*/ 1, 0xff61, 0x55b2, 0xaad3, 0x7914, 0x8675, 0x2ca6, 0xd3c7,
2021 0x9e28, 0x6149, 0xcb9a, 0x34fb, 0xe73c, 0x185d, 0xb28e, 0x4def },
2022 {/*1e*/ 1, 0x5451, 0xa8a2, 0xfcf3, 0x9694, 0xc2c5, 0x3e36, 0x6a67,
2023 0xebe8, 0xbfb9, 0x434a, 0x171b, 0x7d7c, 0x292d, 0xd5de, 0x818f },
2024 {/*1f*/ 1, 0x6fc1, 0xb542, 0xda83, 0x19f4, 0x7635, 0xacb6, 0xc377,
2025 0x2e58, 0x4199, 0x9b1a, 0xf4db, 0x37ac, 0x586d, 0x82ee, 0xed2f },
2026
2027 /* ECC bits are also in the set of tokens and they too can go bad
2028 * first 2 cover channel 0, while the second 2 cover channel 1
2029 */
2030 {/*20*/ 0, 0xbe01, 0xd702, 0x6903, 0x2104, 0x9f05, 0xf606, 0x4807,
2031 0x3208, 0x8c09, 0xe50a, 0x5b0b, 0x130c, 0xad0d, 0xc40e, 0x7a0f },
2032 {/*21*/ 0, 0x4101, 0x8202, 0xc303, 0x5804, 0x1905, 0xda06, 0x9b07,
2033 0xac08, 0xed09, 0x2e0a, 0x6f0b, 0x640c, 0xb50d, 0x760e, 0x370f },
2034 {/*22*/ 1, 0xc441, 0x4882, 0x8cc3, 0xf654, 0x3215, 0xbed6, 0x7a97,
2035 0x5ba8, 0x9fe9, 0x132a, 0xd76b, 0xadfc, 0x69bd, 0xe57e, 0x213f },
2036 {/*23*/ 1, 0x7621, 0x9b32, 0xed13, 0xda44, 0xac65, 0x4176, 0x3757,
2037 0x6f88, 0x19a9, 0xf4ba, 0x829b, 0xb5cc, 0xc3ed, 0x2efe, 0x58df }
2038}; 1850};
2039 1851
2040/* 1852static u16 x8_vectors[] = {
2041 * Given the syndrome argument, scan each of the channel tables for a syndrome 1853 0x0145, 0x028a, 0x2374, 0x43c8, 0xa1f0, 0x0520, 0x0a40, 0x1480,
2042 * match. Depending on which table it is found, return the channel number. 1854 0x0211, 0x0422, 0x0844, 0x1088, 0x01b0, 0x44e0, 0x23c0, 0xed80,
2043 */ 1855 0x1011, 0x0116, 0x022c, 0x0458, 0x08b0, 0x8c60, 0x2740, 0x4e80,
2044static int get_channel_from_ecc_syndrome(unsigned short syndrome) 1856 0x0411, 0x0822, 0x1044, 0x0158, 0x02b0, 0x2360, 0x46c0, 0xab80,
1857 0x0811, 0x1022, 0x012c, 0x0258, 0x04b0, 0x4660, 0x8cc0, 0x2780,
1858 0x2071, 0x40e2, 0xa0c4, 0x0108, 0x0210, 0x0420, 0x0840, 0x1080,
1859 0x4071, 0x80e2, 0x0104, 0x0208, 0x0410, 0x0820, 0x1040, 0x2080,
1860 0x8071, 0x0102, 0x0204, 0x0408, 0x0810, 0x1020, 0x2040, 0x4080,
1861 0x019d, 0x03d6, 0x136c, 0x2198, 0x50b0, 0xb2e0, 0x0740, 0x0e80,
1862 0x0189, 0x03ea, 0x072c, 0x0e58, 0x1cb0, 0x56e0, 0x37c0, 0xf580,
1863 0x01fd, 0x0376, 0x06ec, 0x0bb8, 0x1110, 0x2220, 0x4440, 0x8880,
1864 0x0163, 0x02c6, 0x1104, 0x0758, 0x0eb0, 0x2be0, 0x6140, 0xc280,
1865 0x02fd, 0x01c6, 0x0b5c, 0x1108, 0x07b0, 0x25a0, 0x8840, 0x6180,
1866 0x0801, 0x012e, 0x025c, 0x04b8, 0x1370, 0x26e0, 0x57c0, 0xb580,
1867 0x0401, 0x0802, 0x015c, 0x02b8, 0x22b0, 0x13e0, 0x7140, 0xe280,
1868 0x0201, 0x0402, 0x0804, 0x01b8, 0x11b0, 0x31a0, 0x8040, 0x7180,
1869 0x0101, 0x0202, 0x0404, 0x0808, 0x1010, 0x2020, 0x4040, 0x8080,
1870 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080,
1871 0x0100, 0x0200, 0x0400, 0x0800, 0x1000, 0x2000, 0x4000, 0x8000,
1872};
1873
1874static int decode_syndrome(u16 syndrome, u16 *vectors, int num_vecs,
1875 int v_dim)
2045{ 1876{
2046 int row; 1877 unsigned int i, err_sym;
2047 int column; 1878
1879 for (err_sym = 0; err_sym < num_vecs / v_dim; err_sym++) {
1880 u16 s = syndrome;
1881 int v_idx = err_sym * v_dim;
1882 int v_end = (err_sym + 1) * v_dim;
1883
1884 /* walk over all 16 bits of the syndrome */
1885 for (i = 1; i < (1U << 16); i <<= 1) {
2048 1886
2049 /* Determine column to scan */ 1887 /* if bit is set in that eigenvector... */
2050 column = syndrome & 0xF; 1888 if (v_idx < v_end && vectors[v_idx] & i) {
1889 u16 ev_comp = vectors[v_idx++];
2051 1890
2052 /* Scan all rows, looking for syndrome, or end of table */ 1891 /* ... and bit set in the modified syndrome, */
2053 for (row = 0; row < NUMBER_ECC_ROWS; row++) { 1892 if (s & i) {
2054 if (ecc_chipkill_syndromes[row][column] == syndrome) 1893 /* remove it. */
2055 return ecc_chipkill_syndromes[row][0]; 1894 s ^= ev_comp;
1895
1896 if (!s)
1897 return err_sym;
1898 }
1899
1900 } else if (s & i)
1901 /* can't get to zero, move to next symbol */
1902 break;
1903 }
2056 } 1904 }
2057 1905
2058 debugf0("syndrome(%x) not found\n", syndrome); 1906 debugf0("syndrome(%x) not found\n", syndrome);
2059 return -1; 1907 return -1;
2060} 1908}
2061 1909
1910static int map_err_sym_to_channel(int err_sym, int sym_size)
1911{
1912 if (sym_size == 4)
1913 switch (err_sym) {
1914 case 0x20:
1915 case 0x21:
1916 return 0;
1917 break;
1918 case 0x22:
1919 case 0x23:
1920 return 1;
1921 break;
1922 default:
1923 return err_sym >> 4;
1924 break;
1925 }
1926 /* x8 symbols */
1927 else
1928 switch (err_sym) {
1929 /* imaginary bits not in a DIMM */
1930 case 0x10:
1931 WARN(1, KERN_ERR "Invalid error symbol: 0x%x\n",
1932 err_sym);
1933 return -1;
1934 break;
1935
1936 case 0x11:
1937 return 0;
1938 break;
1939 case 0x12:
1940 return 1;
1941 break;
1942 default:
1943 return err_sym >> 3;
1944 break;
1945 }
1946 return -1;
1947}
1948
1949static int get_channel_from_ecc_syndrome(struct mem_ctl_info *mci, u16 syndrome)
1950{
1951 struct amd64_pvt *pvt = mci->pvt_info;
1952 u32 value = 0;
1953 int err_sym = 0;
1954
1955 amd64_read_pci_cfg(pvt->misc_f3_ctl, 0x180, &value);
1956
1957 /* F3x180[EccSymbolSize]=1, x8 symbols */
1958 if (boot_cpu_data.x86 == 0x10 &&
1959 boot_cpu_data.x86_model > 7 &&
1960 value & BIT(25)) {
1961 err_sym = decode_syndrome(syndrome, x8_vectors,
1962 ARRAY_SIZE(x8_vectors), 8);
1963 return map_err_sym_to_channel(err_sym, 8);
1964 } else {
1965 err_sym = decode_syndrome(syndrome, x4_vectors,
1966 ARRAY_SIZE(x4_vectors), 4);
1967 return map_err_sym_to_channel(err_sym, 4);
1968 }
1969}
1970
2062/* 1971/*
2063 * Check for valid error in the NB Status High register. If so, proceed to read 1972 * Check for valid error in the NB Status High register. If so, proceed to read
2064 * NB Status Low, NB Address Low and NB Address High registers and store data 1973 * NB Status Low, NB Address Low and NB Address High registers and store data
@@ -2073,40 +1982,24 @@ static int amd64_get_error_info_regs(struct mem_ctl_info *mci,
2073{ 1982{
2074 struct amd64_pvt *pvt; 1983 struct amd64_pvt *pvt;
2075 struct pci_dev *misc_f3_ctl; 1984 struct pci_dev *misc_f3_ctl;
2076 int err = 0;
2077 1985
2078 pvt = mci->pvt_info; 1986 pvt = mci->pvt_info;
2079 misc_f3_ctl = pvt->misc_f3_ctl; 1987 misc_f3_ctl = pvt->misc_f3_ctl;
2080 1988
2081 err = pci_read_config_dword(misc_f3_ctl, K8_NBSH, &regs->nbsh); 1989 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSH, &regs->nbsh))
2082 if (err) 1990 return 0;
2083 goto err_reg;
2084 1991
2085 if (!(regs->nbsh & K8_NBSH_VALID_BIT)) 1992 if (!(regs->nbsh & K8_NBSH_VALID_BIT))
2086 return 0; 1993 return 0;
2087 1994
2088 /* valid error, read remaining error information registers */ 1995 /* valid error, read remaining error information registers */
2089 err = pci_read_config_dword(misc_f3_ctl, K8_NBSL, &regs->nbsl); 1996 if (amd64_read_pci_cfg(misc_f3_ctl, K8_NBSL, &regs->nbsl) ||
2090 if (err) 1997 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAL, &regs->nbeal) ||
2091 goto err_reg; 1998 amd64_read_pci_cfg(misc_f3_ctl, K8_NBEAH, &regs->nbeah) ||
2092 1999 amd64_read_pci_cfg(misc_f3_ctl, K8_NBCFG, &regs->nbcfg))
2093 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAL, &regs->nbeal); 2000 return 0;
2094 if (err)
2095 goto err_reg;
2096
2097 err = pci_read_config_dword(misc_f3_ctl, K8_NBEAH, &regs->nbeah);
2098 if (err)
2099 goto err_reg;
2100
2101 err = pci_read_config_dword(misc_f3_ctl, K8_NBCFG, &regs->nbcfg);
2102 if (err)
2103 goto err_reg;
2104 2001
2105 return 1; 2002 return 1;
2106
2107err_reg:
2108 debugf0("Reading error info register failed\n");
2109 return 0;
2110} 2003}
2111 2004
2112/* 2005/*
@@ -2184,7 +2077,7 @@ static void amd64_handle_ce(struct mem_ctl_info *mci,
2184 struct err_regs *info) 2077 struct err_regs *info)
2185{ 2078{
2186 struct amd64_pvt *pvt = mci->pvt_info; 2079 struct amd64_pvt *pvt = mci->pvt_info;
2187 u64 SystemAddress; 2080 u64 sys_addr;
2188 2081
2189 /* Ensure that the Error Address is VALID */ 2082 /* Ensure that the Error Address is VALID */
2190 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) { 2083 if ((info->nbsh & K8_NBSH_VALID_ERROR_ADDR) == 0) {
@@ -2194,22 +2087,23 @@ static void amd64_handle_ce(struct mem_ctl_info *mci,
2194 return; 2087 return;
2195 } 2088 }
2196 2089
2197 SystemAddress = extract_error_address(mci, info); 2090 sys_addr = pvt->ops->get_error_address(mci, info);
2198 2091
2199 amd64_mc_printk(mci, KERN_ERR, 2092 amd64_mc_printk(mci, KERN_ERR,
2200 "CE ERROR_ADDRESS= 0x%llx\n", SystemAddress); 2093 "CE ERROR_ADDRESS= 0x%llx\n", sys_addr);
2201 2094
2202 pvt->ops->map_sysaddr_to_csrow(mci, info, SystemAddress); 2095 pvt->ops->map_sysaddr_to_csrow(mci, info, sys_addr);
2203} 2096}
2204 2097
2205/* Handle any Un-correctable Errors (UEs) */ 2098/* Handle any Un-correctable Errors (UEs) */
2206static void amd64_handle_ue(struct mem_ctl_info *mci, 2099static void amd64_handle_ue(struct mem_ctl_info *mci,
2207 struct err_regs *info) 2100 struct err_regs *info)
2208{ 2101{
2102 struct amd64_pvt *pvt = mci->pvt_info;
2103 struct mem_ctl_info *log_mci, *src_mci = NULL;
2209 int csrow; 2104 int csrow;
2210 u64 SystemAddress; 2105 u64 sys_addr;
2211 u32 page, offset; 2106 u32 page, offset;
2212 struct mem_ctl_info *log_mci, *src_mci = NULL;
2213 2107
2214 log_mci = mci; 2108 log_mci = mci;
2215 2109
@@ -2220,31 +2114,31 @@ static void amd64_handle_ue(struct mem_ctl_info *mci,
2220 return; 2114 return;
2221 } 2115 }
2222 2116
2223 SystemAddress = extract_error_address(mci, info); 2117 sys_addr = pvt->ops->get_error_address(mci, info);
2224 2118
2225 /* 2119 /*
2226 * Find out which node the error address belongs to. This may be 2120 * Find out which node the error address belongs to. This may be
2227 * different from the node that detected the error. 2121 * different from the node that detected the error.
2228 */ 2122 */
2229 src_mci = find_mc_by_sys_addr(mci, SystemAddress); 2123 src_mci = find_mc_by_sys_addr(mci, sys_addr);
2230 if (!src_mci) { 2124 if (!src_mci) {
2231 amd64_mc_printk(mci, KERN_CRIT, 2125 amd64_mc_printk(mci, KERN_CRIT,
2232 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n", 2126 "ERROR ADDRESS (0x%lx) value NOT mapped to a MC\n",
2233 (unsigned long)SystemAddress); 2127 (unsigned long)sys_addr);
2234 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 2128 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2235 return; 2129 return;
2236 } 2130 }
2237 2131
2238 log_mci = src_mci; 2132 log_mci = src_mci;
2239 2133
2240 csrow = sys_addr_to_csrow(log_mci, SystemAddress); 2134 csrow = sys_addr_to_csrow(log_mci, sys_addr);
2241 if (csrow < 0) { 2135 if (csrow < 0) {
2242 amd64_mc_printk(mci, KERN_CRIT, 2136 amd64_mc_printk(mci, KERN_CRIT,
2243 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n", 2137 "ERROR_ADDRESS (0x%lx) value NOT mapped to 'csrow'\n",
2244 (unsigned long)SystemAddress); 2138 (unsigned long)sys_addr);
2245 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 2139 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR);
2246 } else { 2140 } else {
2247 error_address_to_page_and_offset(SystemAddress, &page, &offset); 2141 error_address_to_page_and_offset(sys_addr, &page, &offset);
2248 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); 2142 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR);
2249 } 2143 }
2250} 2144}
@@ -2384,30 +2278,26 @@ static void amd64_free_mc_sibling_devices(struct amd64_pvt *pvt)
2384static void amd64_read_mc_registers(struct amd64_pvt *pvt) 2278static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2385{ 2279{
2386 u64 msr_val; 2280 u64 msr_val;
2387 int dram, err = 0; 2281 int dram;
2388 2282
2389 /* 2283 /*
2390 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since 2284 * Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
2391 * those are Read-As-Zero 2285 * those are Read-As-Zero
2392 */ 2286 */
2393 rdmsrl(MSR_K8_TOP_MEM1, msr_val); 2287 rdmsrl(MSR_K8_TOP_MEM1, pvt->top_mem);
2394 pvt->top_mem = msr_val >> 23; 2288 debugf0(" TOP_MEM: 0x%016llx\n", pvt->top_mem);
2395 debugf0(" TOP_MEM=0x%08llx\n", pvt->top_mem);
2396 2289
2397 /* check first whether TOP_MEM2 is enabled */ 2290 /* check first whether TOP_MEM2 is enabled */
2398 rdmsrl(MSR_K8_SYSCFG, msr_val); 2291 rdmsrl(MSR_K8_SYSCFG, msr_val);
2399 if (msr_val & (1U << 21)) { 2292 if (msr_val & (1U << 21)) {
2400 rdmsrl(MSR_K8_TOP_MEM2, msr_val); 2293 rdmsrl(MSR_K8_TOP_MEM2, pvt->top_mem2);
2401 pvt->top_mem2 = msr_val >> 23; 2294 debugf0(" TOP_MEM2: 0x%016llx\n", pvt->top_mem2);
2402 debugf0(" TOP_MEM2=0x%08llx\n", pvt->top_mem2);
2403 } else 2295 } else
2404 debugf0(" TOP_MEM2 disabled.\n"); 2296 debugf0(" TOP_MEM2 disabled.\n");
2405 2297
2406 amd64_cpu_display_info(pvt); 2298 amd64_cpu_display_info(pvt);
2407 2299
2408 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap); 2300 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCAP, &pvt->nbcap);
2409 if (err)
2410 goto err_reg;
2411 2301
2412 if (pvt->ops->read_dram_ctl_register) 2302 if (pvt->ops->read_dram_ctl_register)
2413 pvt->ops->read_dram_ctl_register(pvt); 2303 pvt->ops->read_dram_ctl_register(pvt);
@@ -2425,13 +2315,12 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2425 * debug output block away. 2315 * debug output block away.
2426 */ 2316 */
2427 if (pvt->dram_rw_en[dram] != 0) { 2317 if (pvt->dram_rw_en[dram] != 0) {
2428 debugf1(" DRAM_BASE[%d]: 0x%8.08x-%8.08x " 2318 debugf1(" DRAM-BASE[%d]: 0x%016llx "
2429 "DRAM_LIMIT: 0x%8.08x-%8.08x\n", 2319 "DRAM-LIMIT: 0x%016llx\n",
2430 dram, 2320 dram,
2431 (u32)(pvt->dram_base[dram] >> 32), 2321 pvt->dram_base[dram],
2432 (u32)(pvt->dram_base[dram] & 0xFFFFFFFF), 2322 pvt->dram_limit[dram]);
2433 (u32)(pvt->dram_limit[dram] >> 32), 2323
2434 (u32)(pvt->dram_limit[dram] & 0xFFFFFFFF));
2435 debugf1(" IntlvEn=%s %s %s " 2324 debugf1(" IntlvEn=%s %s %s "
2436 "IntlvSel=%d DstNode=%d\n", 2325 "IntlvSel=%d DstNode=%d\n",
2437 pvt->dram_IntlvEn[dram] ? 2326 pvt->dram_IntlvEn[dram] ?
@@ -2445,44 +2334,20 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt)
2445 2334
2446 amd64_read_dct_base_mask(pvt); 2335 amd64_read_dct_base_mask(pvt);
2447 2336
2448 err = pci_read_config_dword(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar); 2337 amd64_read_pci_cfg(pvt->addr_f1_ctl, K8_DHAR, &pvt->dhar);
2449 if (err)
2450 goto err_reg;
2451
2452 amd64_read_dbam_reg(pvt); 2338 amd64_read_dbam_reg(pvt);
2453 2339
2454 err = pci_read_config_dword(pvt->misc_f3_ctl, 2340 amd64_read_pci_cfg(pvt->misc_f3_ctl,
2455 F10_ONLINE_SPARE, &pvt->online_spare); 2341 F10_ONLINE_SPARE, &pvt->online_spare);
2456 if (err)
2457 goto err_reg;
2458
2459 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2460 if (err)
2461 goto err_reg;
2462 2342
2463 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0); 2343 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_0, &pvt->dclr0);
2464 if (err) 2344 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_0, &pvt->dchr0);
2465 goto err_reg;
2466 2345
2467 if (!dct_ganging_enabled(pvt)) { 2346 if (!dct_ganging_enabled(pvt)) {
2468 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCLR_1, 2347 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCLR_1, &pvt->dclr1);
2469 &pvt->dclr1); 2348 amd64_read_pci_cfg(pvt->dram_f2_ctl, F10_DCHR_1, &pvt->dchr1);
2470 if (err)
2471 goto err_reg;
2472
2473 err = pci_read_config_dword(pvt->dram_f2_ctl, F10_DCHR_1,
2474 &pvt->dchr1);
2475 if (err)
2476 goto err_reg;
2477 } 2349 }
2478
2479 amd64_dump_misc_regs(pvt); 2350 amd64_dump_misc_regs(pvt);
2480
2481 return;
2482
2483err_reg:
2484 debugf0("Reading an MC register failed\n");
2485
2486} 2351}
2487 2352
2488/* 2353/*
@@ -2521,7 +2386,7 @@ err_reg:
2521 */ 2386 */
2522static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt) 2387static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2523{ 2388{
2524 u32 dram_map, nr_pages; 2389 u32 cs_mode, nr_pages;
2525 2390
2526 /* 2391 /*
2527 * The math on this doesn't look right on the surface because x/2*4 can 2392 * The math on this doesn't look right on the surface because x/2*4 can
@@ -2530,9 +2395,9 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2530 * number of bits to shift the DBAM register to extract the proper CSROW 2395 * number of bits to shift the DBAM register to extract the proper CSROW
2531 * field. 2396 * field.
2532 */ 2397 */
2533 dram_map = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF; 2398 cs_mode = (pvt->dbam0 >> ((csrow_nr / 2) * 4)) & 0xF;
2534 2399
2535 nr_pages = pvt->ops->dbam_map_to_pages(pvt, dram_map); 2400 nr_pages = pvt->ops->dbam_to_cs(pvt, cs_mode) << (20 - PAGE_SHIFT);
2536 2401
2537 /* 2402 /*
2538 * If dual channel then double the memory size of single channel. 2403 * If dual channel then double the memory size of single channel.
@@ -2540,7 +2405,7 @@ static u32 amd64_csrow_nr_pages(int csrow_nr, struct amd64_pvt *pvt)
2540 */ 2405 */
2541 nr_pages <<= (pvt->channel_count - 1); 2406 nr_pages <<= (pvt->channel_count - 1);
2542 2407
2543 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, dram_map); 2408 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2544 debugf0(" nr_pages= %u channel-count = %d\n", 2409 debugf0(" nr_pages= %u channel-count = %d\n",
2545 nr_pages, pvt->channel_count); 2410 nr_pages, pvt->channel_count);
2546 2411
@@ -2556,13 +2421,11 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2556 struct csrow_info *csrow; 2421 struct csrow_info *csrow;
2557 struct amd64_pvt *pvt; 2422 struct amd64_pvt *pvt;
2558 u64 input_addr_min, input_addr_max, sys_addr; 2423 u64 input_addr_min, input_addr_max, sys_addr;
2559 int i, err = 0, empty = 1; 2424 int i, empty = 1;
2560 2425
2561 pvt = mci->pvt_info; 2426 pvt = mci->pvt_info;
2562 2427
2563 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg); 2428 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &pvt->nbcfg);
2564 if (err)
2565 debugf0("Reading K8_NBCFG failed\n");
2566 2429
2567 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg, 2430 debugf0("NBCFG= 0x%x CHIPKILL= %s DRAM ECC= %s\n", pvt->nbcfg,
2568 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2431 (pvt->nbcfg & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2618,6 +2481,109 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2618 return empty; 2481 return empty;
2619} 2482}
2620 2483
2484/* get all cores on this DCT */
2485static void get_cpus_on_this_dct_cpumask(struct cpumask *mask, int nid)
2486{
2487 int cpu;
2488
2489 for_each_online_cpu(cpu)
2490 if (amd_get_nb_id(cpu) == nid)
2491 cpumask_set_cpu(cpu, mask);
2492}
2493
2494/* check MCG_CTL on all the cpus on this node */
2495static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2496{
2497 cpumask_var_t mask;
2498 struct msr *msrs;
2499 int cpu, nbe, idx = 0;
2500 bool ret = false;
2501
2502 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) {
2503 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2504 __func__);
2505 return false;
2506 }
2507
2508 get_cpus_on_this_dct_cpumask(mask, nid);
2509
2510 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(mask), GFP_KERNEL);
2511 if (!msrs) {
2512 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2513 __func__);
2514 free_cpumask_var(mask);
2515 return false;
2516 }
2517
2518 rdmsr_on_cpus(mask, MSR_IA32_MCG_CTL, msrs);
2519
2520 for_each_cpu(cpu, mask) {
2521 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
2522
2523 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2524 cpu, msrs[idx].q,
2525 (nbe ? "enabled" : "disabled"));
2526
2527 if (!nbe)
2528 goto out;
2529
2530 idx++;
2531 }
2532 ret = true;
2533
2534out:
2535 kfree(msrs);
2536 free_cpumask_var(mask);
2537 return ret;
2538}
2539
2540static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
2541{
2542 cpumask_var_t cmask;
2543 struct msr *msrs = NULL;
2544 int cpu, idx = 0;
2545
2546 if (!zalloc_cpumask_var(&cmask, GFP_KERNEL)) {
2547 amd64_printk(KERN_WARNING, "%s: error allocating mask\n",
2548 __func__);
2549 return false;
2550 }
2551
2552 get_cpus_on_this_dct_cpumask(cmask, pvt->mc_node_id);
2553
2554 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(cmask), GFP_KERNEL);
2555 if (!msrs) {
2556 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2557 __func__);
2558 return -ENOMEM;
2559 }
2560
2561 rdmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2562
2563 for_each_cpu(cpu, cmask) {
2564
2565 if (on) {
2566 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2567 pvt->flags.ecc_report = 1;
2568
2569 msrs[idx].l |= K8_MSR_MCGCTL_NBE;
2570 } else {
2571 /*
2572 * Turn off ECC reporting only when it was off before
2573 */
2574 if (!pvt->flags.ecc_report)
2575 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2576 }
2577 idx++;
2578 }
2579 wrmsr_on_cpus(cmask, MSR_IA32_MCG_CTL, msrs);
2580
2581 kfree(msrs);
2582 free_cpumask_var(cmask);
2583
2584 return 0;
2585}
2586
2621/* 2587/*
2622 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we" 2588 * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
2623 * enable it. 2589 * enable it.
@@ -2625,24 +2591,16 @@ static int amd64_init_csrows(struct mem_ctl_info *mci)
2625static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci) 2591static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2626{ 2592{
2627 struct amd64_pvt *pvt = mci->pvt_info; 2593 struct amd64_pvt *pvt = mci->pvt_info;
2628 const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); 2594 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2629 int cpu, idx = 0, err = 0;
2630 struct msr msrs[cpumask_weight(cpumask)];
2631 u32 value;
2632 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2633 2595
2634 if (!ecc_enable_override) 2596 if (!ecc_enable_override)
2635 return; 2597 return;
2636 2598
2637 memset(msrs, 0, sizeof(msrs));
2638
2639 amd64_printk(KERN_WARNING, 2599 amd64_printk(KERN_WARNING,
2640 "'ecc_enable_override' parameter is active, " 2600 "'ecc_enable_override' parameter is active, "
2641 "Enabling AMD ECC hardware now: CAUTION\n"); 2601 "Enabling AMD ECC hardware now: CAUTION\n");
2642 2602
2643 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value); 2603 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2644 if (err)
2645 debugf0("Reading K8_NBCTL failed\n");
2646 2604
2647 /* turn on UECCn and CECCEn bits */ 2605 /* turn on UECCn and CECCEn bits */
2648 pvt->old_nbctl = value & mask; 2606 pvt->old_nbctl = value & mask;
@@ -2651,20 +2609,11 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2651 value |= mask; 2609 value |= mask;
2652 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2610 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2653 2611
2654 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); 2612 if (amd64_toggle_ecc_err_reporting(pvt, ON))
2655 2613 amd64_printk(KERN_WARNING, "Error enabling ECC reporting over "
2656 for_each_cpu(cpu, cpumask) { 2614 "MCGCTL!\n");
2657 if (msrs[idx].l & K8_MSR_MCGCTL_NBE)
2658 set_bit(idx, &pvt->old_mcgctl);
2659 2615
2660 msrs[idx].l |= K8_MSR_MCGCTL_NBE; 2616 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2661 idx++;
2662 }
2663 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2664
2665 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value);
2666 if (err)
2667 debugf0("Reading K8_NBCFG failed\n");
2668 2617
2669 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value, 2618 debugf0("NBCFG(1)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
2670 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled", 2619 (value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
@@ -2679,9 +2628,7 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2679 value |= K8_NBCFG_ECC_ENABLE; 2628 value |= K8_NBCFG_ECC_ENABLE;
2680 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value); 2629 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
2681 2630
2682 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); 2631 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2683 if (err)
2684 debugf0("Reading K8_NBCFG failed\n");
2685 2632
2686 if (!(value & K8_NBCFG_ECC_ENABLE)) { 2633 if (!(value & K8_NBCFG_ECC_ENABLE)) {
2687 amd64_printk(KERN_WARNING, 2634 amd64_printk(KERN_WARNING,
@@ -2701,86 +2648,21 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
2701 2648
2702static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt) 2649static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
2703{ 2650{
2704 const cpumask_t *cpumask = cpumask_of_node(pvt->mc_node_id); 2651 u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2705 int cpu, idx = 0, err = 0;
2706 struct msr msrs[cpumask_weight(cpumask)];
2707 u32 value;
2708 u32 mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
2709 2652
2710 if (!pvt->nbctl_mcgctl_saved) 2653 if (!pvt->nbctl_mcgctl_saved)
2711 return; 2654 return;
2712 2655
2713 memset(msrs, 0, sizeof(msrs)); 2656 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
2714
2715 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCTL, &value);
2716 if (err)
2717 debugf0("Reading K8_NBCTL failed\n");
2718 value &= ~mask; 2657 value &= ~mask;
2719 value |= pvt->old_nbctl; 2658 value |= pvt->old_nbctl;
2720 2659
2721 /* restore the NB Enable MCGCTL bit */ 2660 /* restore the NB Enable MCGCTL bit */
2722 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value); 2661 pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
2723 2662
2724 rdmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs); 2663 if (amd64_toggle_ecc_err_reporting(pvt, OFF))
2725 2664 amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
2726 for_each_cpu(cpu, cpumask) { 2665 "MCGCTL!\n");
2727 msrs[idx].l &= ~K8_MSR_MCGCTL_NBE;
2728 msrs[idx].l |=
2729 test_bit(idx, &pvt->old_mcgctl) << K8_MSR_MCGCTL_NBE;
2730 idx++;
2731 }
2732
2733 wrmsr_on_cpus(cpumask, K8_MSR_MCGCTL, msrs);
2734}
2735
2736/* get all cores on this DCT */
2737static void get_cpus_on_this_dct_cpumask(cpumask_t *mask, int nid)
2738{
2739 int cpu;
2740
2741 for_each_online_cpu(cpu)
2742 if (amd_get_nb_id(cpu) == nid)
2743 cpumask_set_cpu(cpu, mask);
2744}
2745
2746/* check MCG_CTL on all the cpus on this node */
2747static bool amd64_nb_mce_bank_enabled_on_node(int nid)
2748{
2749 cpumask_t mask;
2750 struct msr *msrs;
2751 int cpu, nbe, idx = 0;
2752 bool ret = false;
2753
2754 cpumask_clear(&mask);
2755
2756 get_cpus_on_this_dct_cpumask(&mask, nid);
2757
2758 msrs = kzalloc(sizeof(struct msr) * cpumask_weight(&mask), GFP_KERNEL);
2759 if (!msrs) {
2760 amd64_printk(KERN_WARNING, "%s: error allocating msrs\n",
2761 __func__);
2762 return false;
2763 }
2764
2765 rdmsr_on_cpus(&mask, MSR_IA32_MCG_CTL, msrs);
2766
2767 for_each_cpu(cpu, &mask) {
2768 nbe = msrs[idx].l & K8_MSR_MCGCTL_NBE;
2769
2770 debugf0("core: %u, MCG_CTL: 0x%llx, NB MSR is %s\n",
2771 cpu, msrs[idx].q,
2772 (nbe ? "enabled" : "disabled"));
2773
2774 if (!nbe)
2775 goto out;
2776
2777 idx++;
2778 }
2779 ret = true;
2780
2781out:
2782 kfree(msrs);
2783 return ret;
2784} 2666}
2785 2667
2786/* 2668/*
@@ -2797,13 +2679,10 @@ static const char *ecc_warning =
2797static int amd64_check_ecc_enabled(struct amd64_pvt *pvt) 2679static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
2798{ 2680{
2799 u32 value; 2681 u32 value;
2800 int err = 0;
2801 u8 ecc_enabled = 0; 2682 u8 ecc_enabled = 0;
2802 bool nb_mce_en = false; 2683 bool nb_mce_en = false;
2803 2684
2804 err = pci_read_config_dword(pvt->misc_f3_ctl, K8_NBCFG, &value); 2685 amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
2805 if (err)
2806 debugf0("Reading K8_NBCTL failed\n");
2807 2686
2808 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE); 2687 ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
2809 if (!ecc_enabled) 2688 if (!ecc_enabled)
@@ -2909,7 +2788,6 @@ static int amd64_probe_one_instance(struct pci_dev *dram_f2_ctl,
2909 pvt->ext_model = boot_cpu_data.x86_model >> 4; 2788 pvt->ext_model = boot_cpu_data.x86_model >> 4;
2910 pvt->mc_type_index = mc_type_index; 2789 pvt->mc_type_index = mc_type_index;
2911 pvt->ops = family_ops(mc_type_index); 2790 pvt->ops = family_ops(mc_type_index);
2912 pvt->old_mcgctl = 0;
2913 2791
2914 /* 2792 /*
2915 * We have the dram_f2_ctl device as an argument, now go reserve its 2793 * We have the dram_f2_ctl device as an argument, now go reserve its
@@ -2959,17 +2837,10 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt)
2959{ 2837{
2960 int node_id = pvt->mc_node_id; 2838 int node_id = pvt->mc_node_id;
2961 struct mem_ctl_info *mci; 2839 struct mem_ctl_info *mci;
2962 int ret, err = 0; 2840 int ret = -ENODEV;
2963 2841
2964 amd64_read_mc_registers(pvt); 2842 amd64_read_mc_registers(pvt);
2965 2843
2966 ret = -ENODEV;
2967 if (pvt->ops->probe_valid_hardware) {
2968 err = pvt->ops->probe_valid_hardware(pvt);
2969 if (err)
2970 goto err_exit;
2971 }
2972
2973 /* 2844 /*
2974 * We need to determine how many memory channels there are. Then use 2845 * We need to determine how many memory channels there are. Then use
2975 * that information for calculating the size of the dynamic instance 2846 * that information for calculating the size of the dynamic instance
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index c6f359a85207..41bc561e5981 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -129,24 +129,22 @@
129 * sections 3.5.4 and 3.5.5 for more information. 129 * sections 3.5.4 and 3.5.5 for more information.
130 */ 130 */
131 131
132#define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ 132#define EDAC_AMD64_VERSION " Ver: 3.3.0 " __DATE__
133#define EDAC_MOD_STR "amd64_edac" 133#define EDAC_MOD_STR "amd64_edac"
134 134
135#define EDAC_MAX_NUMNODES 8 135#define EDAC_MAX_NUMNODES 8
136 136
137/* Extended Model from CPUID, for CPU Revision numbers */ 137/* Extended Model from CPUID, for CPU Revision numbers */
138#define OPTERON_CPU_LE_REV_C 0 138#define K8_REV_D 1
139#define OPTERON_CPU_REV_D 1 139#define K8_REV_E 2
140#define OPTERON_CPU_REV_E 2 140#define K8_REV_F 4
141
142/* NPT processors have the following Extended Models */
143#define OPTERON_CPU_REV_F 4
144#define OPTERON_CPU_REV_FA 5
145 141
146/* Hardware limit on ChipSelect rows per MC and processors per system */ 142/* Hardware limit on ChipSelect rows per MC and processors per system */
147#define MAX_CS_COUNT 8 143#define MAX_CS_COUNT 8
148#define DRAM_REG_COUNT 8 144#define DRAM_REG_COUNT 8
149 145
146#define ON true
147#define OFF false
150 148
151/* 149/*
152 * PCI-defined configuration space registers 150 * PCI-defined configuration space registers
@@ -241,7 +239,7 @@
241#define F10_DCHR_1 0x194 239#define F10_DCHR_1 0x194
242 240
243#define F10_DCHR_FOUR_RANK_DIMM BIT(18) 241#define F10_DCHR_FOUR_RANK_DIMM BIT(18)
244#define F10_DCHR_Ddr3Mode BIT(8) 242#define DDR3_MODE BIT(8)
245#define F10_DCHR_MblMode BIT(6) 243#define F10_DCHR_MblMode BIT(6)
246 244
247 245
@@ -382,14 +380,9 @@ enum {
382#define K8_NBCAP_CORES (BIT(12)|BIT(13)) 380#define K8_NBCAP_CORES (BIT(12)|BIT(13))
383#define K8_NBCAP_CHIPKILL BIT(4) 381#define K8_NBCAP_CHIPKILL BIT(4)
384#define K8_NBCAP_SECDED BIT(3) 382#define K8_NBCAP_SECDED BIT(3)
385#define K8_NBCAP_8_NODE BIT(2)
386#define K8_NBCAP_DUAL_NODE BIT(1)
387#define K8_NBCAP_DCT_DUAL BIT(0) 383#define K8_NBCAP_DCT_DUAL BIT(0)
388 384
389/* 385/* MSRs */
390 * MSR Regs
391 */
392#define K8_MSR_MCGCTL 0x017b
393#define K8_MSR_MCGCTL_NBE BIT(4) 386#define K8_MSR_MCGCTL_NBE BIT(4)
394 387
395#define K8_MSR_MC4CTL 0x0410 388#define K8_MSR_MC4CTL 0x0410
@@ -487,7 +480,6 @@ struct amd64_pvt {
487 /* Save old hw registers' values before we modified them */ 480 /* Save old hw registers' values before we modified them */
488 u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */ 481 u32 nbctl_mcgctl_saved; /* When true, following 2 are valid */
489 u32 old_nbctl; 482 u32 old_nbctl;
490 unsigned long old_mcgctl; /* per core on this node */
491 483
492 /* MC Type Index value: socket F vs Family 10h */ 484 /* MC Type Index value: socket F vs Family 10h */
493 u32 mc_type_index; 485 u32 mc_type_index;
@@ -495,6 +487,7 @@ struct amd64_pvt {
495 /* misc settings */ 487 /* misc settings */
496 struct flags { 488 struct flags {
497 unsigned long cf8_extcfg:1; 489 unsigned long cf8_extcfg:1;
490 unsigned long ecc_report:1;
498 } flags; 491 } flags;
499}; 492};
500 493
@@ -504,7 +497,6 @@ struct scrubrate {
504}; 497};
505 498
506extern struct scrubrate scrubrates[23]; 499extern struct scrubrate scrubrates[23];
507extern u32 revf_quad_ddr2_shift[16];
508extern const char *tt_msgs[4]; 500extern const char *tt_msgs[4];
509extern const char *ll_msgs[4]; 501extern const char *ll_msgs[4];
510extern const char *rrrr_msgs[16]; 502extern const char *rrrr_msgs[16];
@@ -534,17 +526,15 @@ extern struct mcidev_sysfs_attribute amd64_dbg_attrs[NUM_DBG_ATTRS],
534 * functions and per device encoding/decoding logic. 526 * functions and per device encoding/decoding logic.
535 */ 527 */
536struct low_ops { 528struct low_ops {
537 int (*probe_valid_hardware)(struct amd64_pvt *pvt); 529 int (*early_channel_count) (struct amd64_pvt *pvt);
538 int (*early_channel_count)(struct amd64_pvt *pvt); 530
539 531 u64 (*get_error_address) (struct mem_ctl_info *mci,
540 u64 (*get_error_address)(struct mem_ctl_info *mci, 532 struct err_regs *info);
541 struct err_regs *info); 533 void (*read_dram_base_limit) (struct amd64_pvt *pvt, int dram);
542 void (*read_dram_base_limit)(struct amd64_pvt *pvt, int dram); 534 void (*read_dram_ctl_register) (struct amd64_pvt *pvt);
543 void (*read_dram_ctl_register)(struct amd64_pvt *pvt); 535 void (*map_sysaddr_to_csrow) (struct mem_ctl_info *mci,
544 void (*map_sysaddr_to_csrow)(struct mem_ctl_info *mci, 536 struct err_regs *info, u64 SystemAddr);
545 struct err_regs *info, 537 int (*dbam_to_cs) (struct amd64_pvt *pvt, int cs_mode);
546 u64 SystemAddr);
547 int (*dbam_map_to_pages)(struct amd64_pvt *pvt, int dram_map);
548}; 538};
549 539
550struct amd64_family_type { 540struct amd64_family_type {
@@ -566,6 +556,22 @@ static inline struct low_ops *family_ops(int index)
566 return &amd64_family_types[index].ops; 556 return &amd64_family_types[index].ops;
567} 557}
568 558
559static inline int amd64_read_pci_cfg_dword(struct pci_dev *pdev, int offset,
560 u32 *val, const char *func)
561{
562 int err = 0;
563
564 err = pci_read_config_dword(pdev, offset, val);
565 if (err)
566 amd64_printk(KERN_WARNING, "%s: error reading F%dx%x.\n",
567 func, PCI_FUNC(pdev->devfn), offset);
568
569 return err;
570}
571
572#define amd64_read_pci_cfg(pdev, offset, val) \
573 amd64_read_pci_cfg_dword(pdev, offset, val, __func__)
574
569/* 575/*
570 * For future CPU versions, verify the following as new 'slow' rates appear and 576 * For future CPU versions, verify the following as new 'slow' rates appear and
571 * modify the necessary skip values for the supported CPU. 577 * modify the necessary skip values for the supported CPU.
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 12f355cafdbe..001b2e797fb3 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -74,6 +74,7 @@
74 74
75#ifdef CONFIG_EDAC_DEBUG 75#ifdef CONFIG_EDAC_DEBUG
76extern int edac_debug_level; 76extern int edac_debug_level;
77extern const char *edac_mem_types[];
77 78
78#ifndef CONFIG_EDAC_DEBUG_VERBOSE 79#ifndef CONFIG_EDAC_DEBUG_VERBOSE
79#define edac_debug_printk(level, fmt, arg...) \ 80#define edac_debug_printk(level, fmt, arg...) \
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index b629c41756f0..3630308e7b81 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -76,6 +76,30 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
76 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 76 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
77} 77}
78 78
79/*
80 * keep those in sync with the enum mem_type
81 */
82const char *edac_mem_types[] = {
83 "Empty csrow",
84 "Reserved csrow type",
85 "Unknown csrow type",
86 "Fast page mode RAM",
87 "Extended data out RAM",
88 "Burst Extended data out RAM",
89 "Single data rate SDRAM",
90 "Registered single data rate SDRAM",
91 "Double data rate SDRAM",
92 "Registered Double data rate SDRAM",
93 "Rambus DRAM",
94 "Unbuffered DDR2 RAM",
95 "Fully buffered DDR2",
96 "Registered DDR2 RAM",
97 "Rambus XDR",
98 "Unbuffered DDR3 RAM",
99 "Registered DDR3 RAM",
100};
101EXPORT_SYMBOL_GPL(edac_mem_types);
102
79#endif /* CONFIG_EDAC_DEBUG */ 103#endif /* CONFIG_EDAC_DEBUG */
80 104
81/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. 105/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'.
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 689cc6a6214d..c693fcc2213c 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -306,7 +306,7 @@ void amd_decode_nb_mce(int node_id, struct err_regs *regs, int handle_errors)
306 * value encoding has changed so interpret those differently 306 * value encoding has changed so interpret those differently
307 */ 307 */
308 if ((boot_cpu_data.x86 == 0x10) && 308 if ((boot_cpu_data.x86 == 0x10) &&
309 (boot_cpu_data.x86_model > 8)) { 309 (boot_cpu_data.x86_model > 7)) {
310 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL) 310 if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
311 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf)); 311 pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
312 } else { 312 } else {
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index ae4556f0c0c1..96768e160866 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2218,6 +2218,13 @@ static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
2218 page = payload >> PAGE_SHIFT; 2218 page = payload >> PAGE_SHIFT;
2219 offset = payload & ~PAGE_MASK; 2219 offset = payload & ~PAGE_MASK;
2220 rest = p->payload_length; 2220 rest = p->payload_length;
2221 /*
2222 * The controllers I've tested have not worked correctly when
2223 * second_req_count is zero. Rather than do something we know won't
2224 * work, return an error
2225 */
2226 if (rest == 0)
2227 return -EINVAL;
2221 2228
2222 /* FIXME: make packet-per-buffer/dual-buffer a context option */ 2229 /* FIXME: make packet-per-buffer/dual-buffer a context option */
2223 while (rest > 0) { 2230 while (rest > 0) {
@@ -2271,7 +2278,7 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2271 unsigned long payload) 2278 unsigned long payload)
2272{ 2279{
2273 struct iso_context *ctx = container_of(base, struct iso_context, base); 2280 struct iso_context *ctx = container_of(base, struct iso_context, base);
2274 struct descriptor *d = NULL, *pd = NULL; 2281 struct descriptor *d, *pd;
2275 struct fw_iso_packet *p = packet; 2282 struct fw_iso_packet *p = packet;
2276 dma_addr_t d_bus, page_bus; 2283 dma_addr_t d_bus, page_bus;
2277 u32 z, header_z, rest; 2284 u32 z, header_z, rest;
@@ -2309,8 +2316,9 @@ static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
2309 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d))); 2316 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
2310 2317
2311 rest = payload_per_buffer; 2318 rest = payload_per_buffer;
2319 pd = d;
2312 for (j = 1; j < z; j++) { 2320 for (j = 1; j < z; j++) {
2313 pd = d + j; 2321 pd++;
2314 pd->control = cpu_to_le16(DESCRIPTOR_STATUS | 2322 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
2315 DESCRIPTOR_INPUT_MORE); 2323 DESCRIPTOR_INPUT_MORE);
2316 2324
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 3c8827a7aabd..470ef6779db3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -15,7 +15,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
15 15
16drm-$(CONFIG_COMPAT) += drm_ioc32.o 16drm-$(CONFIG_COMPAT) += drm_ioc32.o
17 17
18drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o 18drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
19 19
20obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o 20obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
21 21
@@ -31,3 +31,5 @@ obj-$(CONFIG_DRM_I915) += i915/
31obj-$(CONFIG_DRM_SIS) += sis/ 31obj-$(CONFIG_DRM_SIS) += sis/
32obj-$(CONFIG_DRM_SAVAGE)+= savage/ 32obj-$(CONFIG_DRM_SAVAGE)+= savage/
33obj-$(CONFIG_DRM_VIA) +=via/ 33obj-$(CONFIG_DRM_VIA) +=via/
34obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
35obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3f7c500b2115..5124401f266a 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
125DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, 125DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
126 drm_tv_subconnector_enum_list) 126 drm_tv_subconnector_enum_list)
127 127
128static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
129 { DRM_MODE_DIRTY_OFF, "Off" },
130 { DRM_MODE_DIRTY_ON, "On" },
131 { DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
132};
133
134DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
135 drm_dirty_info_enum_list)
136
128struct drm_conn_prop_enum_list { 137struct drm_conn_prop_enum_list {
129 int type; 138 int type;
130 char *name; 139 char *name;
@@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
247 mutex_unlock(&dev->mode_config.idr_mutex); 256 mutex_unlock(&dev->mode_config.idr_mutex);
248} 257}
249 258
250void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) 259struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
260 uint32_t id, uint32_t type)
251{ 261{
252 struct drm_mode_object *obj = NULL; 262 struct drm_mode_object *obj = NULL;
253 263
@@ -802,6 +812,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
802EXPORT_SYMBOL(drm_mode_create_dithering_property); 812EXPORT_SYMBOL(drm_mode_create_dithering_property);
803 813
804/** 814/**
815 * drm_mode_create_dirty_property - create dirty property
816 * @dev: DRM device
817 *
818 * Called by a driver the first time it's needed, must be attached to desired
819 * connectors.
820 */
821int drm_mode_create_dirty_info_property(struct drm_device *dev)
822{
823 struct drm_property *dirty_info;
824 int i;
825
826 if (dev->mode_config.dirty_info_property)
827 return 0;
828
829 dirty_info =
830 drm_property_create(dev, DRM_MODE_PROP_ENUM |
831 DRM_MODE_PROP_IMMUTABLE,
832 "dirty",
833 ARRAY_SIZE(drm_dirty_info_enum_list));
834 for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
835 drm_property_add_enum(dirty_info, i,
836 drm_dirty_info_enum_list[i].type,
837 drm_dirty_info_enum_list[i].name);
838 dev->mode_config.dirty_info_property = dirty_info;
839
840 return 0;
841}
842EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
843
844/**
805 * drm_mode_config_init - initialize DRM mode_configuration structure 845 * drm_mode_config_init - initialize DRM mode_configuration structure
806 * @dev: DRM device 846 * @dev: DRM device
807 * 847 *
@@ -1753,6 +1793,71 @@ out:
1753 return ret; 1793 return ret;
1754} 1794}
1755 1795
1796int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
1797 void *data, struct drm_file *file_priv)
1798{
1799 struct drm_clip_rect __user *clips_ptr;
1800 struct drm_clip_rect *clips = NULL;
1801 struct drm_mode_fb_dirty_cmd *r = data;
1802 struct drm_mode_object *obj;
1803 struct drm_framebuffer *fb;
1804 unsigned flags;
1805 int num_clips;
1806 int ret = 0;
1807
1808 mutex_lock(&dev->mode_config.mutex);
1809 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
1810 if (!obj) {
1811 DRM_ERROR("invalid framebuffer id\n");
1812 ret = -EINVAL;
1813 goto out_err1;
1814 }
1815 fb = obj_to_fb(obj);
1816
1817 num_clips = r->num_clips;
1818 clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
1819
1820 if (!num_clips != !clips_ptr) {
1821 ret = -EINVAL;
1822 goto out_err1;
1823 }
1824
1825 flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
1826
1827 /* If userspace annotates copy, clips must come in pairs */
1828 if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
1829 ret = -EINVAL;
1830 goto out_err1;
1831 }
1832
1833 if (num_clips && clips_ptr) {
1834 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
1835 if (!clips) {
1836 ret = -ENOMEM;
1837 goto out_err1;
1838 }
1839
1840 ret = copy_from_user(clips, clips_ptr,
1841 num_clips * sizeof(*clips));
1842 if (ret)
1843 goto out_err2;
1844 }
1845
1846 if (fb->funcs->dirty) {
1847 ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
1848 } else {
1849 ret = -ENOSYS;
1850 goto out_err2;
1851 }
1852
1853out_err2:
1854 kfree(clips);
1855out_err1:
1856 mutex_unlock(&dev->mode_config.mutex);
1857 return ret;
1858}
1859
1860
1756/** 1861/**
1757 * drm_fb_release - remove and free the FBs on this file 1862 * drm_fb_release - remove and free the FBs on this file
1758 * @filp: file * from the ioctl 1863 * @filp: file * from the ioctl
@@ -2478,3 +2583,72 @@ out:
2478 mutex_unlock(&dev->mode_config.mutex); 2583 mutex_unlock(&dev->mode_config.mutex);
2479 return ret; 2584 return ret;
2480} 2585}
2586
2587int drm_mode_page_flip_ioctl(struct drm_device *dev,
2588 void *data, struct drm_file *file_priv)
2589{
2590 struct drm_mode_crtc_page_flip *page_flip = data;
2591 struct drm_mode_object *obj;
2592 struct drm_crtc *crtc;
2593 struct drm_framebuffer *fb;
2594 struct drm_pending_vblank_event *e = NULL;
2595 unsigned long flags;
2596 int ret = -EINVAL;
2597
2598 if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
2599 page_flip->reserved != 0)
2600 return -EINVAL;
2601
2602 mutex_lock(&dev->mode_config.mutex);
2603 obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
2604 if (!obj)
2605 goto out;
2606 crtc = obj_to_crtc(obj);
2607
2608 if (crtc->funcs->page_flip == NULL)
2609 goto out;
2610
2611 obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
2612 if (!obj)
2613 goto out;
2614 fb = obj_to_fb(obj);
2615
2616 if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
2617 ret = -ENOMEM;
2618 spin_lock_irqsave(&dev->event_lock, flags);
2619 if (file_priv->event_space < sizeof e->event) {
2620 spin_unlock_irqrestore(&dev->event_lock, flags);
2621 goto out;
2622 }
2623 file_priv->event_space -= sizeof e->event;
2624 spin_unlock_irqrestore(&dev->event_lock, flags);
2625
2626 e = kzalloc(sizeof *e, GFP_KERNEL);
2627 if (e == NULL) {
2628 spin_lock_irqsave(&dev->event_lock, flags);
2629 file_priv->event_space += sizeof e->event;
2630 spin_unlock_irqrestore(&dev->event_lock, flags);
2631 goto out;
2632 }
2633
2634 e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
2635 e->event.base.length = sizeof e->event;
2636 e->event.user_data = page_flip->user_data;
2637 e->base.event = &e->event.base;
2638 e->base.file_priv = file_priv;
2639 e->base.destroy =
2640 (void (*) (struct drm_pending_event *)) kfree;
2641 }
2642
2643 ret = crtc->funcs->page_flip(crtc, fb, e);
2644 if (ret) {
2645 spin_lock_irqsave(&dev->event_lock, flags);
2646 file_priv->event_space += sizeof e->event;
2647 spin_unlock_irqrestore(&dev->event_lock, flags);
2648 kfree(e);
2649 }
2650
2651out:
2652 mutex_unlock(&dev->mode_config.mutex);
2653 return ret;
2654}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index bbfd110a7168..4231d6db72ec 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
109 109
110 count = (*connector_funcs->get_modes)(connector); 110 count = (*connector_funcs->get_modes)(connector);
111 if (!count) { 111 if (!count) {
112 count = drm_add_modes_noedid(connector, 800, 600); 112 count = drm_add_modes_noedid(connector, 1024, 768);
113 if (!count) 113 if (!count)
114 return 0; 114 return 0;
115 } 115 }
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
1020{ 1020{
1021 int count = 0; 1021 int count = 0;
1022 1022
1023 /* disable all the possible outputs/crtcs before entering KMS mode */
1024 drm_helper_disable_unused_functions(dev);
1025
1023 drm_fb_helper_parse_command_line(dev); 1026 drm_fb_helper_parse_command_line(dev);
1024 1027
1025 count = drm_helper_probe_connector_modes(dev, 1028 count = drm_helper_probe_connector_modes(dev,
diff --git a/drivers/gpu/drm/i915/intel_dp_i2c.c b/drivers/gpu/drm/drm_dp_i2c_helper.c
index a63b6f57d2d4..548887c8506f 100644
--- a/drivers/gpu/drm/i915/intel_dp_i2c.c
+++ b/drivers/gpu/drm/drm_dp_i2c_helper.c
@@ -28,84 +28,20 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/i2c.h> 30#include <linux/i2c.h>
31#include "intel_dp.h" 31#include "drm_dp_helper.h"
32#include "drmP.h" 32#include "drmP.h"
33 33
34/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */ 34/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
35
36#define MODE_I2C_START 1
37#define MODE_I2C_WRITE 2
38#define MODE_I2C_READ 4
39#define MODE_I2C_STOP 8
40
41static int 35static int
42i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode, 36i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
43 uint8_t write_byte, uint8_t *read_byte) 37 uint8_t write_byte, uint8_t *read_byte)
44{ 38{
45 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 39 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
46 uint16_t address = algo_data->address;
47 uint8_t msg[5];
48 uint8_t reply[2];
49 int msg_bytes;
50 int reply_bytes;
51 int ret; 40 int ret;
52 41
53 /* Set up the command byte */ 42 ret = (*algo_data->aux_ch)(adapter, mode,
54 if (mode & MODE_I2C_READ) 43 write_byte, read_byte);
55 msg[0] = AUX_I2C_READ << 4; 44 return ret;
56 else
57 msg[0] = AUX_I2C_WRITE << 4;
58
59 if (!(mode & MODE_I2C_STOP))
60 msg[0] |= AUX_I2C_MOT << 4;
61
62 msg[1] = address >> 8;
63 msg[2] = address;
64
65 switch (mode) {
66 case MODE_I2C_WRITE:
67 msg[3] = 0;
68 msg[4] = write_byte;
69 msg_bytes = 5;
70 reply_bytes = 1;
71 break;
72 case MODE_I2C_READ:
73 msg[3] = 0;
74 msg_bytes = 4;
75 reply_bytes = 2;
76 break;
77 default:
78 msg_bytes = 3;
79 reply_bytes = 1;
80 break;
81 }
82
83 for (;;) {
84 ret = (*algo_data->aux_ch)(adapter,
85 msg, msg_bytes,
86 reply, reply_bytes);
87 if (ret < 0) {
88 DRM_DEBUG("aux_ch failed %d\n", ret);
89 return ret;
90 }
91 switch (reply[0] & AUX_I2C_REPLY_MASK) {
92 case AUX_I2C_REPLY_ACK:
93 if (mode == MODE_I2C_READ) {
94 *read_byte = reply[1];
95 }
96 return reply_bytes - 1;
97 case AUX_I2C_REPLY_NACK:
98 DRM_DEBUG("aux_ch nack\n");
99 return -EREMOTEIO;
100 case AUX_I2C_REPLY_DEFER:
101 DRM_DEBUG("aux_ch defer\n");
102 udelay(100);
103 break;
104 default:
105 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
106 return -EREMOTEIO;
107 }
108 }
109} 45}
110 46
111/* 47/*
@@ -224,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
224 if (ret >= 0) 160 if (ret >= 0)
225 ret = num; 161 ret = num;
226 i2c_algo_dp_aux_stop(adapter, reading); 162 i2c_algo_dp_aux_stop(adapter, reading);
227 DRM_DEBUG("dp_aux_xfer return %d\n", ret); 163 DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
228 return ret; 164 return ret;
229} 165}
230 166
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a75ca63deea6..ff2f1042cb44 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW), 145 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW), 146 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW), 147 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
148 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
148}; 150};
149 151
150#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) 152#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -366,6 +368,29 @@ module_init(drm_core_init);
366module_exit(drm_core_exit); 368module_exit(drm_core_exit);
367 369
368/** 370/**
371 * Copy and IOCTL return string to user space
372 */
373static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
374{
375 int len;
376
377 /* don't overflow userbuf */
378 len = strlen(value);
379 if (len > *buf_len)
380 len = *buf_len;
381
382 /* let userspace know exact length of driver value (which could be
383 * larger than the userspace-supplied buffer) */
384 *buf_len = strlen(value);
385
386 /* finally, try filling in the userbuf */
387 if (len && buf)
388 if (copy_to_user(buf, value, len))
389 return -EFAULT;
390 return 0;
391}
392
393/**
369 * Get version information 394 * Get version information
370 * 395 *
371 * \param inode device inode. 396 * \param inode device inode.
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
380 struct drm_file *file_priv) 405 struct drm_file *file_priv)
381{ 406{
382 struct drm_version *version = data; 407 struct drm_version *version = data;
383 int len; 408 int err;
384 409
385 version->version_major = dev->driver->major; 410 version->version_major = dev->driver->major;
386 version->version_minor = dev->driver->minor; 411 version->version_minor = dev->driver->minor;
387 version->version_patchlevel = dev->driver->patchlevel; 412 version->version_patchlevel = dev->driver->patchlevel;
388 DRM_COPY(version->name, dev->driver->name); 413 err = drm_copy_field(version->name, &version->name_len,
389 DRM_COPY(version->date, dev->driver->date); 414 dev->driver->name);
390 DRM_COPY(version->desc, dev->driver->desc); 415 if (!err)
391 416 err = drm_copy_field(version->date, &version->date_len,
392 return 0; 417 dev->driver->date);
418 if (!err)
419 err = drm_copy_field(version->desc, &version->desc_len,
420 dev->driver->desc);
421
422 return err;
393} 423}
394 424
395/** 425/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index b54ba63d506e..c39b26f1abed 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
123 */ 123 */
124static bool edid_is_valid(struct edid *edid) 124static bool edid_is_valid(struct edid *edid)
125{ 125{
126 int i; 126 int i, score = 0;
127 u8 csum = 0; 127 u8 csum = 0;
128 u8 *raw_edid = (u8 *)edid; 128 u8 *raw_edid = (u8 *)edid;
129 129
130 if (memcmp(edid->header, edid_header, sizeof(edid_header))) 130 for (i = 0; i < sizeof(edid_header); i++)
131 goto bad; 131 if (raw_edid[i] == edid_header[i])
132 if (edid->version != 1) { 132 score++;
133 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version); 133
134 if (score == 8) ;
135 else if (score >= 6) {
136 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
137 memcpy(raw_edid, edid_header, sizeof(edid_header));
138 } else
134 goto bad; 139 goto bad;
135 }
136 if (edid->revision > 4)
137 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
138 140
139 for (i = 0; i < EDID_LENGTH; i++) 141 for (i = 0; i < EDID_LENGTH; i++)
140 csum += raw_edid[i]; 142 csum += raw_edid[i];
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
143 goto bad; 145 goto bad;
144 } 146 }
145 147
148 if (edid->version != 1) {
149 DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
150 goto bad;
151 }
152
153 if (edid->revision > 4)
154 DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
155
146 return 1; 156 return 1;
147 157
148bad: 158bad:
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
481 3048, 3536, 0, 1600, 1603, 1609, 1682, 0, 491 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
482 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, 492 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
483}; 493};
494static const int drm_num_dmt_modes =
495 sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
484 496
485static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, 497static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
486 int hsize, int vsize, int fresh) 498 int hsize, int vsize, int fresh)
487{ 499{
488 int i, count; 500 int i;
489 struct drm_display_mode *ptr, *mode; 501 struct drm_display_mode *ptr, *mode;
490 502
491 count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
492 mode = NULL; 503 mode = NULL;
493 for (i = 0; i < count; i++) { 504 for (i = 0; i < drm_num_dmt_modes; i++) {
494 ptr = &drm_dmt_modes[i]; 505 ptr = &drm_dmt_modes[i];
495 if (hsize == ptr->hdisplay && 506 if (hsize == ptr->hdisplay &&
496 vsize == ptr->vdisplay && 507 vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
834 return modes; 845 return modes;
835} 846}
836 847
848/*
849 * XXX fix this for:
850 * - GTF secondary curve formula
851 * - EDID 1.4 range offsets
852 * - CVT extended bits
853 */
854static bool
855mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
856{
857 struct detailed_data_monitor_range *range;
858 int hsync, vrefresh;
859
860 range = &timing->data.other_data.data.range;
861
862 hsync = drm_mode_hsync(mode);
863 vrefresh = drm_mode_vrefresh(mode);
864
865 if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
866 return false;
867
868 if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
869 return false;
870
871 if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
872 /* be forgiving since it's in units of 10MHz */
873 int max_clock = range->pixel_clock_mhz * 10 + 9;
874 max_clock *= 1000;
875 if (mode->clock > max_clock)
876 return false;
877 }
878
879 return true;
880}
881
882/*
883 * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
884 * need to account for them.
885 */
886static int drm_gtf_modes_for_range(struct drm_connector *connector,
887 struct detailed_timing *timing)
888{
889 int i, modes = 0;
890 struct drm_display_mode *newmode;
891 struct drm_device *dev = connector->dev;
892
893 for (i = 0; i < drm_num_dmt_modes; i++) {
894 if (mode_in_range(drm_dmt_modes + i, timing)) {
895 newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
896 if (newmode) {
897 drm_mode_probed_add(connector, newmode);
898 modes++;
899 }
900 }
901 }
902
903 return modes;
904}
905
906static int drm_cvt_modes(struct drm_connector *connector,
907 struct detailed_timing *timing)
908{
909 int i, j, modes = 0;
910 struct drm_display_mode *newmode;
911 struct drm_device *dev = connector->dev;
912 struct cvt_timing *cvt;
913 const int rates[] = { 60, 85, 75, 60, 50 };
914
915 for (i = 0; i < 4; i++) {
916 int width, height;
917 cvt = &(timing->data.other_data.data.cvt[i]);
918
919 height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
920 switch (cvt->code[1] & 0xc0) {
921 case 0x00:
922 width = height * 4 / 3;
923 break;
924 case 0x40:
925 width = height * 16 / 9;
926 break;
927 case 0x80:
928 width = height * 16 / 10;
929 break;
930 case 0xc0:
931 width = height * 15 / 9;
932 break;
933 }
934
935 for (j = 1; j < 5; j++) {
936 if (cvt->code[2] & (1 << j)) {
937 newmode = drm_cvt_mode(dev, width, height,
938 rates[j], j == 0,
939 false, false);
940 if (newmode) {
941 drm_mode_probed_add(connector, newmode);
942 modes++;
943 }
944 }
945 }
946 }
947
948 return modes;
949}
950
951static int add_detailed_modes(struct drm_connector *connector,
952 struct detailed_timing *timing,
953 struct edid *edid, u32 quirks, int preferred)
954{
955 int i, modes = 0;
956 struct detailed_non_pixel *data = &timing->data.other_data;
957 int timing_level = standard_timing_level(edid);
958 int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
959 struct drm_display_mode *newmode;
960 struct drm_device *dev = connector->dev;
961
962 if (timing->pixel_clock) {
963 newmode = drm_mode_detailed(dev, edid, timing, quirks);
964 if (!newmode)
965 return 0;
966
967 if (preferred)
968 newmode->type |= DRM_MODE_TYPE_PREFERRED;
969
970 drm_mode_probed_add(connector, newmode);
971 return 1;
972 }
973
974 /* other timing types */
975 switch (data->type) {
976 case EDID_DETAIL_MONITOR_RANGE:
977 if (gtf)
978 modes += drm_gtf_modes_for_range(connector, timing);
979 break;
980 case EDID_DETAIL_STD_MODES:
981 /* Six modes per detailed section */
982 for (i = 0; i < 6; i++) {
983 struct std_timing *std;
984 struct drm_display_mode *newmode;
985
986 std = &data->data.timings[i];
987 newmode = drm_mode_std(dev, std, edid->revision,
988 timing_level);
989 if (newmode) {
990 drm_mode_probed_add(connector, newmode);
991 modes++;
992 }
993 }
994 break;
995 case EDID_DETAIL_CVT_3BYTE:
996 modes += drm_cvt_modes(connector, timing);
997 break;
998 default:
999 break;
1000 }
1001
1002 return modes;
1003}
1004
837/** 1005/**
838 * add_detailed_modes - get detailed mode info from EDID data 1006 * add_detailed_info - get detailed mode info from EDID data
839 * @connector: attached connector 1007 * @connector: attached connector
840 * @edid: EDID block to scan 1008 * @edid: EDID block to scan
841 * @quirks: quirks to apply 1009 * @quirks: quirks to apply
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
846static int add_detailed_info(struct drm_connector *connector, 1014static int add_detailed_info(struct drm_connector *connector,
847 struct edid *edid, u32 quirks) 1015 struct edid *edid, u32 quirks)
848{ 1016{
849 struct drm_device *dev = connector->dev; 1017 int i, modes = 0;
850 int i, j, modes = 0;
851 int timing_level;
852
853 timing_level = standard_timing_level(edid);
854 1018
855 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { 1019 for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
856 struct detailed_timing *timing = &edid->detailed_timings[i]; 1020 struct detailed_timing *timing = &edid->detailed_timings[i];
857 struct detailed_non_pixel *data = &timing->data.other_data; 1021 int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
858 struct drm_display_mode *newmode;
859
860 /* X server check is version 1.1 or higher */
861 if (edid->version == 1 && edid->revision >= 1 &&
862 !timing->pixel_clock) {
863 /* Other timing or info */
864 switch (data->type) {
865 case EDID_DETAIL_MONITOR_SERIAL:
866 break;
867 case EDID_DETAIL_MONITOR_STRING:
868 break;
869 case EDID_DETAIL_MONITOR_RANGE:
870 /* Get monitor range data */
871 break;
872 case EDID_DETAIL_MONITOR_NAME:
873 break;
874 case EDID_DETAIL_MONITOR_CPDATA:
875 break;
876 case EDID_DETAIL_STD_MODES:
877 for (j = 0; j < 6; i++) {
878 struct std_timing *std;
879 struct drm_display_mode *newmode;
880
881 std = &data->data.timings[j];
882 newmode = drm_mode_std(dev, std,
883 edid->revision,
884 timing_level);
885 if (newmode) {
886 drm_mode_probed_add(connector, newmode);
887 modes++;
888 }
889 }
890 break;
891 default:
892 break;
893 }
894 } else {
895 newmode = drm_mode_detailed(dev, edid, timing, quirks);
896 if (!newmode)
897 continue;
898 1022
899 /* First detailed mode is preferred */ 1023 /* In 1.0, only timings are allowed */
900 if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING)) 1024 if (!timing->pixel_clock && edid->version == 1 &&
901 newmode->type |= DRM_MODE_TYPE_PREFERRED; 1025 edid->revision == 0)
902 drm_mode_probed_add(connector, newmode); 1026 continue;
903 1027
904 modes++; 1028 modes += add_detailed_modes(connector, timing, edid, quirks,
905 } 1029 preferred);
906 } 1030 }
907 1031
908 return modes; 1032 return modes;
909} 1033}
1034
910/** 1035/**
911 * add_detailed_mode_eedid - get detailed mode info from addtional timing 1036 * add_detailed_mode_eedid - get detailed mode info from addtional timing
912 * EDID block 1037 * EDID block
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
920static int add_detailed_info_eedid(struct drm_connector *connector, 1045static int add_detailed_info_eedid(struct drm_connector *connector,
921 struct edid *edid, u32 quirks) 1046 struct edid *edid, u32 quirks)
922{ 1047{
923 struct drm_device *dev = connector->dev; 1048 int i, modes = 0;
924 int i, j, modes = 0;
925 char *edid_ext = NULL; 1049 char *edid_ext = NULL;
926 struct detailed_timing *timing; 1050 struct detailed_timing *timing;
927 struct detailed_non_pixel *data;
928 struct drm_display_mode *newmode;
929 int edid_ext_num; 1051 int edid_ext_num;
930 int start_offset, end_offset; 1052 int start_offset, end_offset;
931 int timing_level; 1053 int timing_level;
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
976 for (i = start_offset; i < end_offset; 1098 for (i = start_offset; i < end_offset;
977 i += sizeof(struct detailed_timing)) { 1099 i += sizeof(struct detailed_timing)) {
978 timing = (struct detailed_timing *)(edid_ext + i); 1100 timing = (struct detailed_timing *)(edid_ext + i);
979 data = &timing->data.other_data; 1101 modes += add_detailed_modes(connector, timing, edid, quirks, 0);
980 /* Detailed mode timing */
981 if (timing->pixel_clock) {
982 newmode = drm_mode_detailed(dev, edid, timing, quirks);
983 if (!newmode)
984 continue;
985
986 drm_mode_probed_add(connector, newmode);
987
988 modes++;
989 continue;
990 }
991
992 /* Other timing or info */
993 switch (data->type) {
994 case EDID_DETAIL_MONITOR_SERIAL:
995 break;
996 case EDID_DETAIL_MONITOR_STRING:
997 break;
998 case EDID_DETAIL_MONITOR_RANGE:
999 /* Get monitor range data */
1000 break;
1001 case EDID_DETAIL_MONITOR_NAME:
1002 break;
1003 case EDID_DETAIL_MONITOR_CPDATA:
1004 break;
1005 case EDID_DETAIL_STD_MODES:
1006 /* Five modes per detailed section */
1007 for (j = 0; j < 5; i++) {
1008 struct std_timing *std;
1009 struct drm_display_mode *newmode;
1010
1011 std = &data->data.timings[j];
1012 newmode = drm_mode_std(dev, std,
1013 edid->revision,
1014 timing_level);
1015 if (newmode) {
1016 drm_mode_probed_add(connector, newmode);
1017 modes++;
1018 }
1019 }
1020 break;
1021 default:
1022 break;
1023 }
1024 } 1102 }
1025 1103
1026 return modes; 1104 return modes;
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
1066 struct i2c_adapter *adapter, 1144 struct i2c_adapter *adapter,
1067 char *buf, int len) 1145 char *buf, int len)
1068{ 1146{
1069 int ret; 1147 int i;
1070 1148
1071 ret = drm_do_probe_ddc_edid(adapter, buf, len); 1149 for (i = 0; i < 4; i++) {
1072 if (ret != 0) { 1150 if (drm_do_probe_ddc_edid(adapter, buf, len))
1073 goto end; 1151 return -1;
1074 } 1152 if (edid_is_valid((struct edid *)buf))
1075 if (!edid_is_valid((struct edid *)buf)) { 1153 return 0;
1076 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1077 drm_get_connector_name(connector));
1078 ret = -1;
1079 } 1154 }
1080end: 1155
1081 return ret; 1156 /* repeated checksum failures; warn, but carry on */
1157 dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
1158 drm_get_connector_name(connector));
1159 return -1;
1082} 1160}
1083 1161
1084/** 1162/**
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
1296 ptr->vdisplay > vdisplay) 1374 ptr->vdisplay > vdisplay)
1297 continue; 1375 continue;
1298 } 1376 }
1377 if (drm_mode_vrefresh(ptr) > 61)
1378 continue;
1299 mode = drm_mode_duplicate(dev, ptr); 1379 mode = drm_mode_duplicate(dev, ptr);
1300 if (mode) { 1380 if (mode) {
1301 drm_mode_probed_add(connector, mode); 1381 drm_mode_probed_add(connector, mode);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 65ef011fa8ba..1b49fa055f4f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
373 mutex_unlock(&dev->mode_config.mutex); 373 mutex_unlock(&dev->mode_config.mutex);
374 } 374 }
375 } 375 }
376 if (dpms_mode == DRM_MODE_DPMS_OFF) { 376 mutex_lock(&dev->mode_config.mutex);
377 mutex_lock(&dev->mode_config.mutex); 377 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
378 crtc_funcs->dpms(crtc, dpms_mode); 378 mutex_unlock(&dev->mode_config.mutex);
379 mutex_unlock(&dev->mode_config.mutex);
380 }
381 } 379 }
382 } 380 }
383} 381}
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
385int drm_fb_helper_blank(int blank, struct fb_info *info) 383int drm_fb_helper_blank(int blank, struct fb_info *info)
386{ 384{
387 switch (blank) { 385 switch (blank) {
386 /* Display: On; HSync: On, VSync: On */
388 case FB_BLANK_UNBLANK: 387 case FB_BLANK_UNBLANK:
389 drm_fb_helper_on(info); 388 drm_fb_helper_on(info);
390 break; 389 break;
390 /* Display: Off; HSync: On, VSync: On */
391 case FB_BLANK_NORMAL: 391 case FB_BLANK_NORMAL:
392 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); 392 drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
393 break; 393 break;
394 /* Display: Off; HSync: Off, VSync: On */
394 case FB_BLANK_HSYNC_SUSPEND: 395 case FB_BLANK_HSYNC_SUSPEND:
395 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY); 396 drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
396 break; 397 break;
398 /* Display: Off; HSync: On, VSync: Off */
397 case FB_BLANK_VSYNC_SUSPEND: 399 case FB_BLANK_VSYNC_SUSPEND:
398 drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND); 400 drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
399 break; 401 break;
402 /* Display: Off; HSync: Off, VSync: Off */
400 case FB_BLANK_POWERDOWN: 403 case FB_BLANK_POWERDOWN:
401 drm_fb_helper_off(info, DRM_MODE_DPMS_OFF); 404 drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
402 break; 405 break;
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
905 908
906 if (new_fb) { 909 if (new_fb) {
907 info->var.pixclock = 0; 910 info->var.pixclock = 0;
908 if (register_framebuffer(info) < 0) 911 ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
912 if (ret)
913 return ret;
914 if (register_framebuffer(info) < 0) {
915 fb_dealloc_cmap(&info->cmap);
909 return -EINVAL; 916 return -EINVAL;
917 }
910 } else { 918 } else {
911 drm_fb_helper_set_par(info); 919 drm_fb_helper_set_par(info);
912 } 920 }
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
936 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op); 944 unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
937 } 945 }
938 drm_fb_helper_crtc_free(helper); 946 drm_fb_helper_crtc_free(helper);
947 fb_dealloc_cmap(&helper->fb->fbdev->cmap);
939} 948}
940EXPORT_SYMBOL(drm_fb_helper_free); 949EXPORT_SYMBOL(drm_fb_helper_free);
941 950
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 251bc0e3b5ec..08d14df3bb42 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
257 257
258 INIT_LIST_HEAD(&priv->lhead); 258 INIT_LIST_HEAD(&priv->lhead);
259 INIT_LIST_HEAD(&priv->fbs); 259 INIT_LIST_HEAD(&priv->fbs);
260 INIT_LIST_HEAD(&priv->event_list);
261 init_waitqueue_head(&priv->event_wait);
262 priv->event_space = 4096; /* set aside 4k for event buffer */
260 263
261 if (dev->driver->driver_features & DRIVER_GEM) 264 if (dev->driver->driver_features & DRIVER_GEM)
262 drm_gem_open(dev, priv); 265 drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
297 goto out_free; 300 goto out_free;
298 } 301 }
299 } 302 }
303 mutex_lock(&dev->struct_mutex);
304 if (dev->driver->master_set) {
305 ret = dev->driver->master_set(dev, priv, true);
306 if (ret) {
307 /* drop both references if this fails */
308 drm_master_put(&priv->minor->master);
309 drm_master_put(&priv->master);
310 mutex_unlock(&dev->struct_mutex);
311 goto out_free;
312 }
313 }
314 mutex_unlock(&dev->struct_mutex);
300 } else { 315 } else {
301 /* get a reference to the master */ 316 /* get a reference to the master */
302 priv->master = drm_master_get(priv->minor->master); 317 priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
413 } 428 }
414} 429}
415 430
431static void drm_events_release(struct drm_file *file_priv)
432{
433 struct drm_device *dev = file_priv->minor->dev;
434 struct drm_pending_event *e, *et;
435 struct drm_pending_vblank_event *v, *vt;
436 unsigned long flags;
437
438 spin_lock_irqsave(&dev->event_lock, flags);
439
440 /* Remove pending flips */
441 list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
442 if (v->base.file_priv == file_priv) {
443 list_del(&v->base.link);
444 drm_vblank_put(dev, v->pipe);
445 v->base.destroy(&v->base);
446 }
447
448 /* Remove unconsumed events */
449 list_for_each_entry_safe(e, et, &file_priv->event_list, link)
450 e->destroy(e);
451
452 spin_unlock_irqrestore(&dev->event_lock, flags);
453}
454
416/** 455/**
417 * Release file. 456 * Release file.
418 * 457 *
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
451 if (file_priv->minor->master) 490 if (file_priv->minor->master)
452 drm_master_release(dev, filp); 491 drm_master_release(dev, filp);
453 492
493 drm_events_release(file_priv);
494
454 if (dev->driver->driver_features & DRIVER_GEM) 495 if (dev->driver->driver_features & DRIVER_GEM)
455 drm_gem_release(dev, file_priv); 496 drm_gem_release(dev, file_priv);
456 497
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
504 545
505 if (file_priv->minor->master == file_priv->master) { 546 if (file_priv->minor->master == file_priv->master) {
506 /* drop the reference held my the minor */ 547 /* drop the reference held my the minor */
548 if (dev->driver->master_drop)
549 dev->driver->master_drop(dev, file_priv, true);
507 drm_master_put(&file_priv->minor->master); 550 drm_master_put(&file_priv->minor->master);
508 } 551 }
509 } 552 }
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
544} 587}
545EXPORT_SYMBOL(drm_release); 588EXPORT_SYMBOL(drm_release);
546 589
547/** No-op. */ 590static bool
591drm_dequeue_event(struct drm_file *file_priv,
592 size_t total, size_t max, struct drm_pending_event **out)
593{
594 struct drm_device *dev = file_priv->minor->dev;
595 struct drm_pending_event *e;
596 unsigned long flags;
597 bool ret = false;
598
599 spin_lock_irqsave(&dev->event_lock, flags);
600
601 *out = NULL;
602 if (list_empty(&file_priv->event_list))
603 goto out;
604 e = list_first_entry(&file_priv->event_list,
605 struct drm_pending_event, link);
606 if (e->event->length + total > max)
607 goto out;
608
609 file_priv->event_space += e->event->length;
610 list_del(&e->link);
611 *out = e;
612 ret = true;
613
614out:
615 spin_unlock_irqrestore(&dev->event_lock, flags);
616 return ret;
617}
618
619ssize_t drm_read(struct file *filp, char __user *buffer,
620 size_t count, loff_t *offset)
621{
622 struct drm_file *file_priv = filp->private_data;
623 struct drm_pending_event *e;
624 size_t total;
625 ssize_t ret;
626
627 ret = wait_event_interruptible(file_priv->event_wait,
628 !list_empty(&file_priv->event_list));
629 if (ret < 0)
630 return ret;
631
632 total = 0;
633 while (drm_dequeue_event(file_priv, total, count, &e)) {
634 if (copy_to_user(buffer + total,
635 e->event, e->event->length)) {
636 total = -EFAULT;
637 break;
638 }
639
640 total += e->event->length;
641 e->destroy(e);
642 }
643
644 return total;
645}
646EXPORT_SYMBOL(drm_read);
647
548unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait) 648unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
549{ 649{
550 return 0; 650 struct drm_file *file_priv = filp->private_data;
651 unsigned int mask = 0;
652
653 poll_wait(filp, &file_priv->event_wait, wait);
654
655 if (!list_empty(&file_priv->event_list))
656 mask |= POLLIN | POLLRDNORM;
657
658 return mask;
551} 659}
552EXPORT_SYMBOL(drm_poll); 660EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0a6f0b3bdc78..7998ee66b317 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
429 429
430 spin_lock_irqsave(&dev->vbl_lock, irqflags); 430 spin_lock_irqsave(&dev->vbl_lock, irqflags);
431 /* Going from 0->1 means we have to enable interrupts again */ 431 /* Going from 0->1 means we have to enable interrupts again */
432 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && 432 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
433 !dev->vblank_enabled[crtc]) { 433 if (!dev->vblank_enabled[crtc]) {
434 ret = dev->driver->enable_vblank(dev, crtc); 434 ret = dev->driver->enable_vblank(dev, crtc);
435 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 435 DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
436 if (ret) 436 if (ret)
437 atomic_dec(&dev->vblank_refcount[crtc]);
438 else {
439 dev->vblank_enabled[crtc] = 1;
440 drm_update_vblank_count(dev, crtc);
441 }
442 }
443 } else {
444 if (!dev->vblank_enabled[crtc]) {
437 atomic_dec(&dev->vblank_refcount[crtc]); 445 atomic_dec(&dev->vblank_refcount[crtc]);
438 else { 446 ret = -EINVAL;
439 dev->vblank_enabled[crtc] = 1;
440 drm_update_vblank_count(dev, crtc);
441 } 447 }
442 } 448 }
443 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 449 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
464} 470}
465EXPORT_SYMBOL(drm_vblank_put); 471EXPORT_SYMBOL(drm_vblank_put);
466 472
473void drm_vblank_off(struct drm_device *dev, int crtc)
474{
475 unsigned long irqflags;
476
477 spin_lock_irqsave(&dev->vbl_lock, irqflags);
478 DRM_WAKEUP(&dev->vbl_queue[crtc]);
479 dev->vblank_enabled[crtc] = 0;
480 dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
481 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
482}
483EXPORT_SYMBOL(drm_vblank_off);
484
467/** 485/**
468 * drm_vblank_pre_modeset - account for vblanks across mode sets 486 * drm_vblank_pre_modeset - account for vblanks across mode sets
469 * @dev: DRM device 487 * @dev: DRM device
@@ -550,6 +568,63 @@ out:
550 return ret; 568 return ret;
551} 569}
552 570
571static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
572 union drm_wait_vblank *vblwait,
573 struct drm_file *file_priv)
574{
575 struct drm_pending_vblank_event *e;
576 struct timeval now;
577 unsigned long flags;
578 unsigned int seq;
579
580 e = kzalloc(sizeof *e, GFP_KERNEL);
581 if (e == NULL)
582 return -ENOMEM;
583
584 e->pipe = pipe;
585 e->event.base.type = DRM_EVENT_VBLANK;
586 e->event.base.length = sizeof e->event;
587 e->event.user_data = vblwait->request.signal;
588 e->base.event = &e->event.base;
589 e->base.file_priv = file_priv;
590 e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
591
592 do_gettimeofday(&now);
593 spin_lock_irqsave(&dev->event_lock, flags);
594
595 if (file_priv->event_space < sizeof e->event) {
596 spin_unlock_irqrestore(&dev->event_lock, flags);
597 kfree(e);
598 return -ENOMEM;
599 }
600
601 file_priv->event_space -= sizeof e->event;
602 seq = drm_vblank_count(dev, pipe);
603 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
604 (seq - vblwait->request.sequence) <= (1 << 23)) {
605 vblwait->request.sequence = seq + 1;
606 vblwait->reply.sequence = vblwait->request.sequence;
607 }
608
609 DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
610 vblwait->request.sequence, seq, pipe);
611
612 e->event.sequence = vblwait->request.sequence;
613 if ((seq - vblwait->request.sequence) <= (1 << 23)) {
614 e->event.tv_sec = now.tv_sec;
615 e->event.tv_usec = now.tv_usec;
616 drm_vblank_put(dev, e->pipe);
617 list_add_tail(&e->base.link, &e->base.file_priv->event_list);
618 wake_up_interruptible(&e->base.file_priv->event_wait);
619 } else {
620 list_add_tail(&e->base.link, &dev->vblank_event_list);
621 }
622
623 spin_unlock_irqrestore(&dev->event_lock, flags);
624
625 return 0;
626}
627
553/** 628/**
554 * Wait for VBLANK. 629 * Wait for VBLANK.
555 * 630 *
@@ -609,6 +684,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
609 goto done; 684 goto done;
610 } 685 }
611 686
687 if (flags & _DRM_VBLANK_EVENT)
688 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
689
612 if ((flags & _DRM_VBLANK_NEXTONMISS) && 690 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
613 (seq - vblwait->request.sequence) <= (1<<23)) { 691 (seq - vblwait->request.sequence) <= (1<<23)) {
614 vblwait->request.sequence = seq + 1; 692 vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@ done:
641 return ret; 719 return ret;
642} 720}
643 721
722void drm_handle_vblank_events(struct drm_device *dev, int crtc)
723{
724 struct drm_pending_vblank_event *e, *t;
725 struct timeval now;
726 unsigned long flags;
727 unsigned int seq;
728
729 do_gettimeofday(&now);
730 seq = drm_vblank_count(dev, crtc);
731
732 spin_lock_irqsave(&dev->event_lock, flags);
733
734 list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
735 if (e->pipe != crtc)
736 continue;
737 if ((seq - e->event.sequence) > (1<<23))
738 continue;
739
740 DRM_DEBUG("vblank event on %d, current %d\n",
741 e->event.sequence, seq);
742
743 e->event.sequence = seq;
744 e->event.tv_sec = now.tv_sec;
745 e->event.tv_usec = now.tv_usec;
746 drm_vblank_put(dev, e->pipe);
747 list_move_tail(&e->base.link, &e->base.file_priv->event_list);
748 wake_up_interruptible(&e->base.file_priv->event_wait);
749 }
750
751 spin_unlock_irqrestore(&dev->event_lock, flags);
752}
753
644/** 754/**
645 * drm_handle_vblank - handle a vblank event 755 * drm_handle_vblank - handle a vblank event
646 * @dev: DRM device 756 * @dev: DRM device
@@ -651,7 +761,11 @@ done:
651 */ 761 */
652void drm_handle_vblank(struct drm_device *dev, int crtc) 762void drm_handle_vblank(struct drm_device *dev, int crtc)
653{ 763{
764 if (!dev->num_crtcs)
765 return;
766
654 atomic_inc(&dev->_vblank_count[crtc]); 767 atomic_inc(&dev->_vblank_count[crtc]);
655 DRM_WAKEUP(&dev->vbl_queue[crtc]); 768 DRM_WAKEUP(&dev->vbl_queue[crtc]);
769 drm_handle_vblank_events(dev, crtc);
656} 770}
657EXPORT_SYMBOL(drm_handle_vblank); 771EXPORT_SYMBOL(drm_handle_vblank);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 97dc5a4f0de4..d7d7eac3ddd2 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
226} 226}
227EXPORT_SYMBOL(drm_mm_get_block_generic); 227EXPORT_SYMBOL(drm_mm_get_block_generic);
228 228
229struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
230 unsigned long size,
231 unsigned alignment,
232 unsigned long start,
233 unsigned long end,
234 int atomic)
235{
236 struct drm_mm_node *align_splitoff = NULL;
237 unsigned tmp = 0;
238 unsigned wasted = 0;
239
240 if (node->start < start)
241 wasted += start - node->start;
242 if (alignment)
243 tmp = ((node->start + wasted) % alignment);
244
245 if (tmp)
246 wasted += alignment - tmp;
247 if (wasted) {
248 align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
249 if (unlikely(align_splitoff == NULL))
250 return NULL;
251 }
252
253 if (node->size == size) {
254 list_del_init(&node->fl_entry);
255 node->free = 0;
256 } else {
257 node = drm_mm_split_at_start(node, size, atomic);
258 }
259
260 if (align_splitoff)
261 drm_mm_put_block(align_splitoff);
262
263 return node;
264}
265EXPORT_SYMBOL(drm_mm_get_block_range_generic);
266
229/* 267/*
230 * Put a block. Merge with the previous and / or next block if they are free. 268 * Put a block. Merge with the previous and / or next block if they are free.
231 * Otherwise add to the free stack. 269 * Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
331} 369}
332EXPORT_SYMBOL(drm_mm_search_free); 370EXPORT_SYMBOL(drm_mm_search_free);
333 371
372struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
373 unsigned long size,
374 unsigned alignment,
375 unsigned long start,
376 unsigned long end,
377 int best_match)
378{
379 struct list_head *list;
380 const struct list_head *free_stack = &mm->fl_entry;
381 struct drm_mm_node *entry;
382 struct drm_mm_node *best;
383 unsigned long best_size;
384 unsigned wasted;
385
386 best = NULL;
387 best_size = ~0UL;
388
389 list_for_each(list, free_stack) {
390 entry = list_entry(list, struct drm_mm_node, fl_entry);
391 wasted = 0;
392
393 if (entry->size < size)
394 continue;
395
396 if (entry->start > end || (entry->start+entry->size) < start)
397 continue;
398
399 if (entry->start < start)
400 wasted += start - entry->start;
401
402 if (alignment) {
403 register unsigned tmp = (entry->start + wasted) % alignment;
404 if (tmp)
405 wasted += alignment - tmp;
406 }
407
408 if (entry->size >= size + wasted) {
409 if (!best_match)
410 return entry;
411 if (size < best_size) {
412 best = entry;
413 best_size = entry->size;
414 }
415 }
416 }
417
418 return best;
419}
420EXPORT_SYMBOL(drm_mm_search_free_in_range);
421
334int drm_mm_clean(struct drm_mm * mm) 422int drm_mm_clean(struct drm_mm * mm)
335{ 423{
336 struct list_head *head = &mm->ml_entry; 424 struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
381} 469}
382EXPORT_SYMBOL(drm_mm_takedown); 470EXPORT_SYMBOL(drm_mm_takedown);
383 471
472void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
473{
474 struct drm_mm_node *entry;
475 int total_used = 0, total_free = 0, total = 0;
476
477 list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
478 printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
479 prefix, entry->start, entry->start + entry->size,
480 entry->size, entry->free ? "free" : "used");
481 total += entry->size;
482 if (entry->free)
483 total_free += entry->size;
484 else
485 total_used += entry->size;
486 }
487 printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
488 total_used, total_free);
489}
490EXPORT_SYMBOL(drm_mm_debug_table);
491
384#if defined(CONFIG_DEBUG_FS) 492#if defined(CONFIG_DEBUG_FS)
385int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 493int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
386{ 494{
@@ -395,7 +503,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
395 else 503 else
396 total_used += entry->size; 504 total_used += entry->size;
397 } 505 }
398 seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used); 506 seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
399 return 0; 507 return 0;
400} 508}
401EXPORT_SYMBOL(drm_mm_dump_table); 509EXPORT_SYMBOL(drm_mm_dump_table);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 51f677215f1d..6d81a02463a3 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
553} 553}
554EXPORT_SYMBOL(drm_mode_height); 554EXPORT_SYMBOL(drm_mode_height);
555 555
556/** drm_mode_hsync - get the hsync of a mode
557 * @mode: mode
558 *
559 * LOCKING:
560 * None.
561 *
562 * Return @modes's hsync rate in kHz, rounded to the nearest int.
563 */
564int drm_mode_hsync(struct drm_display_mode *mode)
565{
566 unsigned int calc_val;
567
568 if (mode->hsync)
569 return mode->hsync;
570
571 if (mode->htotal < 0)
572 return 0;
573
574 calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
575 calc_val += 500; /* round to 1000Hz */
576 calc_val /= 1000; /* truncate to kHz */
577
578 return calc_val;
579}
580EXPORT_SYMBOL(drm_mode_hsync);
581
556/** 582/**
557 * drm_mode_vrefresh - get the vrefresh of a mode 583 * drm_mode_vrefresh - get the vrefresh of a mode
558 * @mode: mode 584 * @mode: mode
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
560 * LOCKING: 586 * LOCKING:
561 * None. 587 * None.
562 * 588 *
563 * Return @mode's vrefresh rate or calculate it if necessary. 589 * Return @mode's vrefresh rate in Hz or calculate it if necessary.
564 * 590 *
565 * FIXME: why is this needed? shouldn't vrefresh be set already? 591 * FIXME: why is this needed? shouldn't vrefresh be set already?
566 * 592 *
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 55bb8a82d612..ad73e141afdb 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
128 kref_get(&master->refcount); 128 kref_get(&master->refcount);
129 return master; 129 return master;
130} 130}
131EXPORT_SYMBOL(drm_master_get);
131 132
132static void drm_master_destroy(struct kref *kref) 133static void drm_master_destroy(struct kref *kref)
133{ 134{
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
170 kref_put(&(*master)->refcount, drm_master_destroy); 171 kref_put(&(*master)->refcount, drm_master_destroy);
171 *master = NULL; 172 *master = NULL;
172} 173}
174EXPORT_SYMBOL(drm_master_put);
173 175
174int drm_setmaster_ioctl(struct drm_device *dev, void *data, 176int drm_setmaster_ioctl(struct drm_device *dev, void *data,
175 struct drm_file *file_priv) 177 struct drm_file *file_priv)
176{ 178{
179 int ret = 0;
180
177 if (file_priv->is_master) 181 if (file_priv->is_master)
178 return 0; 182 return 0;
179 183
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
188 mutex_lock(&dev->struct_mutex); 192 mutex_lock(&dev->struct_mutex);
189 file_priv->minor->master = drm_master_get(file_priv->master); 193 file_priv->minor->master = drm_master_get(file_priv->master);
190 file_priv->is_master = 1; 194 file_priv->is_master = 1;
195 if (dev->driver->master_set) {
196 ret = dev->driver->master_set(dev, file_priv, false);
197 if (unlikely(ret != 0)) {
198 file_priv->is_master = 0;
199 drm_master_put(&file_priv->minor->master);
200 }
201 }
191 mutex_unlock(&dev->struct_mutex); 202 mutex_unlock(&dev->struct_mutex);
192 } 203 }
193 204
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
204 return -EINVAL; 215 return -EINVAL;
205 216
206 mutex_lock(&dev->struct_mutex); 217 mutex_lock(&dev->struct_mutex);
218 if (dev->driver->master_drop)
219 dev->driver->master_drop(dev, file_priv, false);
207 drm_master_put(&file_priv->minor->master); 220 drm_master_put(&file_priv->minor->master);
208 file_priv->is_master = 0; 221 file_priv->is_master = 0;
209 mutex_unlock(&dev->struct_mutex); 222 mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
220 INIT_LIST_HEAD(&dev->ctxlist); 233 INIT_LIST_HEAD(&dev->ctxlist);
221 INIT_LIST_HEAD(&dev->vmalist); 234 INIT_LIST_HEAD(&dev->vmalist);
222 INIT_LIST_HEAD(&dev->maplist); 235 INIT_LIST_HEAD(&dev->maplist);
236 INIT_LIST_HEAD(&dev->vblank_event_list);
223 237
224 spin_lock_init(&dev->count_lock); 238 spin_lock_init(&dev->count_lock);
225 spin_lock_init(&dev->drw_lock); 239 spin_lock_init(&dev->drw_lock);
240 spin_lock_init(&dev->event_lock);
226 init_timer(&dev->timer); 241 init_timer(&dev->timer);
227 mutex_init(&dev->struct_mutex); 242 mutex_init(&dev->struct_mutex);
228 mutex_init(&dev->ctxlist_mutex); 243 mutex_init(&dev->ctxlist_mutex);
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
new file mode 100644
index 000000000000..6d2abaf35ba2
--- /dev/null
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -0,0 +1,4 @@
1ccflags-y := -Iinclude/drm
2
3ch7006-y := ch7006_drv.o ch7006_mode.o
4obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
new file mode 100644
index 000000000000..9422a74c8b54
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -0,0 +1,531 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "ch7006_priv.h"
28
29/* DRM encoder functions */
30
31static void ch7006_encoder_set_config(struct drm_encoder *encoder,
32 void *params)
33{
34 struct ch7006_priv *priv = to_ch7006_priv(encoder);
35
36 priv->params = params;
37}
38
39static void ch7006_encoder_destroy(struct drm_encoder *encoder)
40{
41 struct ch7006_priv *priv = to_ch7006_priv(encoder);
42
43 drm_property_destroy(encoder->dev, priv->scale_property);
44
45 kfree(priv);
46 to_encoder_slave(encoder)->slave_priv = NULL;
47
48 drm_i2c_encoder_destroy(encoder);
49}
50
51static void ch7006_encoder_dpms(struct drm_encoder *encoder, int mode)
52{
53 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
54 struct ch7006_priv *priv = to_ch7006_priv(encoder);
55 struct ch7006_state *state = &priv->state;
56
57 ch7006_dbg(client, "\n");
58
59 if (mode == priv->last_dpms)
60 return;
61 priv->last_dpms = mode;
62
63 ch7006_setup_power_state(encoder);
64
65 ch7006_load_reg(client, state, CH7006_POWER);
66}
67
68static void ch7006_encoder_save(struct drm_encoder *encoder)
69{
70 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
71 struct ch7006_priv *priv = to_ch7006_priv(encoder);
72
73 ch7006_dbg(client, "\n");
74
75 ch7006_state_save(client, &priv->saved_state);
76}
77
78static void ch7006_encoder_restore(struct drm_encoder *encoder)
79{
80 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
81 struct ch7006_priv *priv = to_ch7006_priv(encoder);
82
83 ch7006_dbg(client, "\n");
84
85 ch7006_state_load(client, &priv->saved_state);
86}
87
88static bool ch7006_encoder_mode_fixup(struct drm_encoder *encoder,
89 struct drm_display_mode *mode,
90 struct drm_display_mode *adjusted_mode)
91{
92 struct ch7006_priv *priv = to_ch7006_priv(encoder);
93
94 /* The ch7006 is painfully picky with the input timings so no
95 * custom modes for now... */
96
97 priv->mode = ch7006_lookup_mode(encoder, mode);
98
99 return !!priv->mode;
100}
101
102static int ch7006_encoder_mode_valid(struct drm_encoder *encoder,
103 struct drm_display_mode *mode)
104{
105 if (ch7006_lookup_mode(encoder, mode))
106 return MODE_OK;
107 else
108 return MODE_BAD;
109}
110
111static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
112 struct drm_display_mode *drm_mode,
113 struct drm_display_mode *adjusted_mode)
114{
115 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
116 struct ch7006_priv *priv = to_ch7006_priv(encoder);
117 struct ch7006_encoder_params *params = priv->params;
118 struct ch7006_state *state = &priv->state;
119 uint8_t *regs = state->regs;
120 struct ch7006_mode *mode = priv->mode;
121 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
122 int start_active;
123
124 ch7006_dbg(client, "\n");
125
126 regs[CH7006_DISPMODE] = norm->dispmode | mode->dispmode;
127 regs[CH7006_BWIDTH] = 0;
128 regs[CH7006_INPUT_FORMAT] = bitf(CH7006_INPUT_FORMAT_FORMAT,
129 params->input_format);
130
131 regs[CH7006_CLKMODE] = CH7006_CLKMODE_SUBC_LOCK
132 | bitf(CH7006_CLKMODE_XCM, params->xcm)
133 | bitf(CH7006_CLKMODE_PCM, params->pcm);
134 if (params->clock_mode)
135 regs[CH7006_CLKMODE] |= CH7006_CLKMODE_MASTER;
136 if (params->clock_edge)
137 regs[CH7006_CLKMODE] |= CH7006_CLKMODE_POS_EDGE;
138
139 start_active = (drm_mode->htotal & ~0x7) - (drm_mode->hsync_start & ~0x7);
140 regs[CH7006_POV] = bitf(CH7006_POV_START_ACTIVE_8, start_active);
141 regs[CH7006_START_ACTIVE] = bitf(CH7006_START_ACTIVE_0, start_active);
142
143 regs[CH7006_INPUT_SYNC] = 0;
144 if (params->sync_direction)
145 regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_OUTPUT;
146 if (params->sync_encoding)
147 regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_EMBEDDED;
148 if (drm_mode->flags & DRM_MODE_FLAG_PVSYNC)
149 regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PVSYNC;
150 if (drm_mode->flags & DRM_MODE_FLAG_PHSYNC)
151 regs[CH7006_INPUT_SYNC] |= CH7006_INPUT_SYNC_PHSYNC;
152
153 regs[CH7006_DETECT] = 0;
154 regs[CH7006_BCLKOUT] = 0;
155
156 regs[CH7006_SUBC_INC3] = 0;
157 if (params->pout_level)
158 regs[CH7006_SUBC_INC3] |= CH7006_SUBC_INC3_POUT_3_3V;
159
160 regs[CH7006_SUBC_INC4] = 0;
161 if (params->active_detect)
162 regs[CH7006_SUBC_INC4] |= CH7006_SUBC_INC4_DS_INPUT;
163
164 regs[CH7006_PLL_CONTROL] = priv->saved_state.regs[CH7006_PLL_CONTROL];
165
166 ch7006_setup_levels(encoder);
167 ch7006_setup_subcarrier(encoder);
168 ch7006_setup_pll(encoder);
169 ch7006_setup_power_state(encoder);
170 ch7006_setup_properties(encoder);
171
172 ch7006_state_load(client, state);
173}
174
175static enum drm_connector_status ch7006_encoder_detect(struct drm_encoder *encoder,
176 struct drm_connector *connector)
177{
178 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
179 struct ch7006_priv *priv = to_ch7006_priv(encoder);
180 struct ch7006_state *state = &priv->state;
181 int det;
182
183 ch7006_dbg(client, "\n");
184
185 ch7006_save_reg(client, state, CH7006_DETECT);
186 ch7006_save_reg(client, state, CH7006_POWER);
187 ch7006_save_reg(client, state, CH7006_CLKMODE);
188
189 ch7006_write(client, CH7006_POWER, CH7006_POWER_RESET |
190 bitfs(CH7006_POWER_LEVEL, NORMAL));
191 ch7006_write(client, CH7006_CLKMODE, CH7006_CLKMODE_MASTER);
192
193 ch7006_write(client, CH7006_DETECT, CH7006_DETECT_SENSE);
194
195 ch7006_write(client, CH7006_DETECT, 0);
196
197 det = ch7006_read(client, CH7006_DETECT);
198
199 ch7006_load_reg(client, state, CH7006_CLKMODE);
200 ch7006_load_reg(client, state, CH7006_POWER);
201 ch7006_load_reg(client, state, CH7006_DETECT);
202
203 if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
204 CH7006_DETECT_SVIDEO_C_TEST|
205 CH7006_DETECT_CVBS_TEST)) == 0)
206 priv->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
207 else if ((det & (CH7006_DETECT_SVIDEO_Y_TEST|
208 CH7006_DETECT_SVIDEO_C_TEST)) == 0)
209 priv->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
210 else if ((det & CH7006_DETECT_CVBS_TEST) == 0)
211 priv->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
212 else
213 priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
214
215 drm_connector_property_set_value(connector,
216 encoder->dev->mode_config.tv_subconnector_property,
217 priv->subconnector);
218
219 return priv->subconnector ? connector_status_connected :
220 connector_status_disconnected;
221}
222
223static int ch7006_encoder_get_modes(struct drm_encoder *encoder,
224 struct drm_connector *connector)
225{
226 struct ch7006_priv *priv = to_ch7006_priv(encoder);
227 struct ch7006_mode *mode;
228 int n = 0;
229
230 for (mode = ch7006_modes; mode->mode.clock; mode++) {
231 if (~mode->valid_scales & 1<<priv->scale ||
232 ~mode->valid_norms & 1<<priv->norm)
233 continue;
234
235 drm_mode_probed_add(connector,
236 drm_mode_duplicate(encoder->dev, &mode->mode));
237
238 n++;
239 }
240
241 return n;
242}
243
244static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
245 struct drm_connector *connector)
246{
247 struct ch7006_priv *priv = to_ch7006_priv(encoder);
248 struct drm_device *dev = encoder->dev;
249 struct drm_mode_config *conf = &dev->mode_config;
250
251 drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
252
253 priv->scale_property = drm_property_create(dev, DRM_MODE_PROP_RANGE,
254 "scale", 2);
255 priv->scale_property->values[0] = 0;
256 priv->scale_property->values[1] = 2;
257
258 drm_connector_attach_property(connector, conf->tv_select_subconnector_property,
259 priv->select_subconnector);
260 drm_connector_attach_property(connector, conf->tv_subconnector_property,
261 priv->subconnector);
262 drm_connector_attach_property(connector, conf->tv_left_margin_property,
263 priv->hmargin);
264 drm_connector_attach_property(connector, conf->tv_bottom_margin_property,
265 priv->vmargin);
266 drm_connector_attach_property(connector, conf->tv_mode_property,
267 priv->norm);
268 drm_connector_attach_property(connector, conf->tv_brightness_property,
269 priv->brightness);
270 drm_connector_attach_property(connector, conf->tv_contrast_property,
271 priv->contrast);
272 drm_connector_attach_property(connector, conf->tv_flicker_reduction_property,
273 priv->flicker);
274 drm_connector_attach_property(connector, priv->scale_property,
275 priv->scale);
276
277 return 0;
278}
279
280static int ch7006_encoder_set_property(struct drm_encoder *encoder,
281 struct drm_connector *connector,
282 struct drm_property *property,
283 uint64_t val)
284{
285 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
286 struct ch7006_priv *priv = to_ch7006_priv(encoder);
287 struct ch7006_state *state = &priv->state;
288 struct drm_mode_config *conf = &encoder->dev->mode_config;
289 struct drm_crtc *crtc = encoder->crtc;
290 bool modes_changed = false;
291
292 ch7006_dbg(client, "\n");
293
294 if (property == conf->tv_select_subconnector_property) {
295 priv->select_subconnector = val;
296
297 ch7006_setup_power_state(encoder);
298
299 ch7006_load_reg(client, state, CH7006_POWER);
300
301 } else if (property == conf->tv_left_margin_property) {
302 priv->hmargin = val;
303
304 ch7006_setup_properties(encoder);
305
306 ch7006_load_reg(client, state, CH7006_POV);
307 ch7006_load_reg(client, state, CH7006_HPOS);
308
309 } else if (property == conf->tv_bottom_margin_property) {
310 priv->vmargin = val;
311
312 ch7006_setup_properties(encoder);
313
314 ch7006_load_reg(client, state, CH7006_POV);
315 ch7006_load_reg(client, state, CH7006_VPOS);
316
317 } else if (property == conf->tv_mode_property) {
318 if (connector->dpms != DRM_MODE_DPMS_OFF)
319 return -EINVAL;
320
321 priv->norm = val;
322
323 modes_changed = true;
324
325 } else if (property == conf->tv_brightness_property) {
326 priv->brightness = val;
327
328 ch7006_setup_levels(encoder);
329
330 ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
331
332 } else if (property == conf->tv_contrast_property) {
333 priv->contrast = val;
334
335 ch7006_setup_properties(encoder);
336
337 ch7006_load_reg(client, state, CH7006_CONTRAST);
338
339 } else if (property == conf->tv_flicker_reduction_property) {
340 priv->flicker = val;
341
342 ch7006_setup_properties(encoder);
343
344 ch7006_load_reg(client, state, CH7006_FFILTER);
345
346 } else if (property == priv->scale_property) {
347 if (connector->dpms != DRM_MODE_DPMS_OFF)
348 return -EINVAL;
349
350 priv->scale = val;
351
352 modes_changed = true;
353
354 } else {
355 return -EINVAL;
356 }
357
358 if (modes_changed) {
359 drm_helper_probe_single_connector_modes(connector, 0, 0);
360
361 /* Disable the crtc to ensure a full modeset is
362 * performed whenever it's turned on again. */
363 if (crtc) {
364 struct drm_mode_set modeset = {
365 .crtc = crtc,
366 };
367
368 crtc->funcs->set_config(&modeset);
369 }
370 }
371
372 return 0;
373}
374
375static struct drm_encoder_slave_funcs ch7006_encoder_funcs = {
376 .set_config = ch7006_encoder_set_config,
377 .destroy = ch7006_encoder_destroy,
378 .dpms = ch7006_encoder_dpms,
379 .save = ch7006_encoder_save,
380 .restore = ch7006_encoder_restore,
381 .mode_fixup = ch7006_encoder_mode_fixup,
382 .mode_valid = ch7006_encoder_mode_valid,
383 .mode_set = ch7006_encoder_mode_set,
384 .detect = ch7006_encoder_detect,
385 .get_modes = ch7006_encoder_get_modes,
386 .create_resources = ch7006_encoder_create_resources,
387 .set_property = ch7006_encoder_set_property,
388};
389
390
391/* I2C driver functions */
392
393static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *id)
394{
395 uint8_t addr = CH7006_VERSION_ID;
396 uint8_t val;
397 int ret;
398
399 ch7006_dbg(client, "\n");
400
401 ret = i2c_master_send(client, &addr, sizeof(addr));
402 if (ret < 0)
403 goto fail;
404
405 ret = i2c_master_recv(client, &val, sizeof(val));
406 if (ret < 0)
407 goto fail;
408
409 ch7006_info(client, "Detected version ID: %x\n", val);
410
411 return 0;
412
413fail:
414 ch7006_err(client, "Error %d reading version ID\n", ret);
415
416 return -ENODEV;
417}
418
419static int ch7006_remove(struct i2c_client *client)
420{
421 ch7006_dbg(client, "\n");
422
423 return 0;
424}
425
426static int ch7006_encoder_init(struct i2c_client *client,
427 struct drm_device *dev,
428 struct drm_encoder_slave *encoder)
429{
430 struct ch7006_priv *priv;
431 int i;
432
433 ch7006_dbg(client, "\n");
434
435 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
436 if (!priv)
437 return -ENOMEM;
438
439 encoder->slave_priv = priv;
440 encoder->slave_funcs = &ch7006_encoder_funcs;
441
442 priv->norm = TV_NORM_PAL;
443 priv->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
444 priv->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
445 priv->scale = 1;
446 priv->contrast = 50;
447 priv->brightness = 50;
448 priv->flicker = 50;
449 priv->hmargin = 50;
450 priv->vmargin = 50;
451 priv->last_dpms = -1;
452
453 if (ch7006_tv_norm) {
454 for (i = 0; i < NUM_TV_NORMS; i++) {
455 if (!strcmp(ch7006_tv_norm_names[i], ch7006_tv_norm)) {
456 priv->norm = i;
457 break;
458 }
459 }
460
461 if (i == NUM_TV_NORMS)
462 ch7006_err(client, "Invalid TV norm setting \"%s\".\n",
463 ch7006_tv_norm);
464 }
465
466 if (ch7006_scale >= 0 && ch7006_scale <= 2)
467 priv->scale = ch7006_scale;
468 else
469 ch7006_err(client, "Invalid scale setting \"%d\".\n",
470 ch7006_scale);
471
472 return 0;
473}
474
475static struct i2c_device_id ch7006_ids[] = {
476 { "ch7006", 0 },
477 { }
478};
479MODULE_DEVICE_TABLE(i2c, ch7006_ids);
480
481static struct drm_i2c_encoder_driver ch7006_driver = {
482 .i2c_driver = {
483 .probe = ch7006_probe,
484 .remove = ch7006_remove,
485
486 .driver = {
487 .name = "ch7006",
488 },
489
490 .id_table = ch7006_ids,
491 },
492
493 .encoder_init = ch7006_encoder_init,
494};
495
496
497/* Module initialization */
498
499static int __init ch7006_init(void)
500{
501 return drm_i2c_encoder_register(THIS_MODULE, &ch7006_driver);
502}
503
504static void __exit ch7006_exit(void)
505{
506 drm_i2c_encoder_unregister(&ch7006_driver);
507}
508
509int ch7006_debug;
510module_param_named(debug, ch7006_debug, int, 0600);
511MODULE_PARM_DESC(debug, "Enable debug output.");
512
513char *ch7006_tv_norm;
514module_param_named(tv_norm, ch7006_tv_norm, charp, 0600);
515MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
516 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, PAL-60, NTSC-M, NTSC-J.\n"
517 "\t\tDefault: PAL");
518
519int ch7006_scale = 1;
520module_param_named(scale, ch7006_scale, int, 0600);
521MODULE_PARM_DESC(scale, "Default scale.\n"
522 "\t\tSupported: 0 -> Select video modes with a higher blanking ratio.\n"
523 "\t\t\t1 -> Select default video modes.\n"
524 "\t\t\t2 -> Select video modes with a lower blanking ratio.");
525
526MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
527MODULE_DESCRIPTION("Chrontel ch7006 TV encoder driver");
528MODULE_LICENSE("GPL and additional rights");
529
530module_init(ch7006_init);
531module_exit(ch7006_exit);
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
new file mode 100644
index 000000000000..87f5445092e8
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -0,0 +1,473 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "ch7006_priv.h"
28
29char *ch7006_tv_norm_names[] = {
30 [TV_NORM_PAL] = "PAL",
31 [TV_NORM_PAL_M] = "PAL-M",
32 [TV_NORM_PAL_N] = "PAL-N",
33 [TV_NORM_PAL_NC] = "PAL-Nc",
34 [TV_NORM_PAL_60] = "PAL-60",
35 [TV_NORM_NTSC_M] = "NTSC-M",
36 [TV_NORM_NTSC_J] = "NTSC-J",
37};
38
39#define NTSC_LIKE_TIMINGS .vrefresh = 60 * fixed1/1.001, \
40 .vdisplay = 480, \
41 .vtotal = 525, \
42 .hvirtual = 660
43
44#define PAL_LIKE_TIMINGS .vrefresh = 50 * fixed1, \
45 .vdisplay = 576, \
46 .vtotal = 625, \
47 .hvirtual = 810
48
49struct ch7006_tv_norm_info ch7006_tv_norms[] = {
50 [TV_NORM_NTSC_M] = {
51 NTSC_LIKE_TIMINGS,
52 .black_level = 0.339 * fixed1,
53 .subc_freq = 3579545 * fixed1,
54 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC),
55 .voffset = 0,
56 },
57 [TV_NORM_NTSC_J] = {
58 NTSC_LIKE_TIMINGS,
59 .black_level = 0.286 * fixed1,
60 .subc_freq = 3579545 * fixed1,
61 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, NTSC_J),
62 .voffset = 0,
63 },
64 [TV_NORM_PAL] = {
65 PAL_LIKE_TIMINGS,
66 .black_level = 0.3 * fixed1,
67 .subc_freq = 4433618.75 * fixed1,
68 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
69 .voffset = 0,
70 },
71 [TV_NORM_PAL_M] = {
72 NTSC_LIKE_TIMINGS,
73 .black_level = 0.339 * fixed1,
74 .subc_freq = 3575611.433 * fixed1,
75 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
76 .voffset = 16,
77 },
78
79 /* The following modes seem to work right but they're
80 * undocumented */
81
82 [TV_NORM_PAL_N] = {
83 PAL_LIKE_TIMINGS,
84 .black_level = 0.339 * fixed1,
85 .subc_freq = 4433618.75 * fixed1,
86 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
87 .voffset = 0,
88 },
89 [TV_NORM_PAL_NC] = {
90 PAL_LIKE_TIMINGS,
91 .black_level = 0.3 * fixed1,
92 .subc_freq = 3582056.25 * fixed1,
93 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL),
94 .voffset = 0,
95 },
96 [TV_NORM_PAL_60] = {
97 NTSC_LIKE_TIMINGS,
98 .black_level = 0.3 * fixed1,
99 .subc_freq = 4433618.75 * fixed1,
100 .dispmode = bitfs(CH7006_DISPMODE_OUTPUT_STD, PAL_M),
101 .voffset = 16,
102 },
103};
104
105#define __MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
106 subc, scale, scale_mask, norm_mask, e_hd, e_vd) { \
107 .mode = { \
108 .name = #hd "x" #vd, \
109 .status = 0, \
110 .type = DRM_MODE_TYPE_DRIVER, \
111 .clock = f, \
112 .hdisplay = hd, \
113 .hsync_start = e_hd + 16, \
114 .hsync_end = e_hd + 80, \
115 .htotal = ht, \
116 .hskew = 0, \
117 .vdisplay = vd, \
118 .vsync_start = vd + 10, \
119 .vsync_end = vd + 26, \
120 .vtotal = vt, \
121 .vscan = 0, \
122 .flags = DRM_MODE_FLAG_##hsynp##HSYNC | \
123 DRM_MODE_FLAG_##vsynp##VSYNC, \
124 .vrefresh = 0, \
125 }, \
126 .enc_hdisp = e_hd, \
127 .enc_vdisp = e_vd, \
128 .subc_coeff = subc * fixed1, \
129 .dispmode = bitfs(CH7006_DISPMODE_SCALING_RATIO, scale) | \
130 bitfs(CH7006_DISPMODE_INPUT_RES, e_hd##x##e_vd), \
131 .valid_scales = scale_mask, \
132 .valid_norms = norm_mask \
133 }
134
135#define MODE(f, hd, vd, ht, vt, hsynp, vsynp, \
136 subc, scale, scale_mask, norm_mask) \
137 __MODE(f, hd, vd, ht, vt, hsynp, vsynp, subc, scale, \
138 scale_mask, norm_mask, hd, vd)
139
140#define NTSC_LIKE (1 << TV_NORM_NTSC_M | 1 << TV_NORM_NTSC_J | \
141 1 << TV_NORM_PAL_M | 1 << TV_NORM_PAL_60)
142
143#define PAL_LIKE (1 << TV_NORM_PAL | 1 << TV_NORM_PAL_N | 1 << TV_NORM_PAL_NC)
144
145struct ch7006_mode ch7006_modes[] = {
146 MODE(21000, 512, 384, 840, 500, N, N, 181.797557582, 5_4, 0x6, PAL_LIKE),
147 MODE(26250, 512, 384, 840, 625, N, N, 145.438046066, 1_1, 0x1, PAL_LIKE),
148 MODE(20140, 512, 384, 800, 420, N, N, 213.257083791, 5_4, 0x4, NTSC_LIKE),
149 MODE(24671, 512, 384, 784, 525, N, N, 174.0874153, 1_1, 0x3, NTSC_LIKE),
150 MODE(28125, 720, 400, 1125, 500, N, N, 135.742176298, 5_4, 0x6, PAL_LIKE),
151 MODE(34875, 720, 400, 1116, 625, N, N, 109.469496898, 1_1, 0x1, PAL_LIKE),
152 MODE(23790, 720, 400, 945, 420, N, N, 160.475642016, 5_4, 0x4, NTSC_LIKE),
153 MODE(29455, 720, 400, 936, 525, N, N, 129.614941843, 1_1, 0x3, NTSC_LIKE),
154 MODE(25000, 640, 400, 1000, 500, N, N, 152.709948279, 5_4, 0x6, PAL_LIKE),
155 MODE(31500, 640, 400, 1008, 625, N, N, 121.198371646, 1_1, 0x1, PAL_LIKE),
156 MODE(21147, 640, 400, 840, 420, N, N, 180.535097338, 5_4, 0x4, NTSC_LIKE),
157 MODE(26434, 640, 400, 840, 525, N, N, 144.42807787, 1_1, 0x2, NTSC_LIKE),
158 MODE(30210, 640, 400, 840, 600, N, N, 126.374568276, 7_8, 0x1, NTSC_LIKE),
159 MODE(21000, 640, 480, 840, 500, N, N, 181.797557582, 5_4, 0x4, PAL_LIKE),
160 MODE(26250, 640, 480, 840, 625, N, N, 145.438046066, 1_1, 0x2, PAL_LIKE),
161 MODE(31500, 640, 480, 840, 750, N, N, 121.198371646, 5_6, 0x1, PAL_LIKE),
162 MODE(24671, 640, 480, 784, 525, N, N, 174.0874153, 1_1, 0x4, NTSC_LIKE),
163 MODE(28196, 640, 480, 784, 600, N, N, 152.326488422, 7_8, 0x2, NTSC_LIKE),
164 MODE(30210, 640, 480, 800, 630, N, N, 142.171389101, 5_6, 0x1, NTSC_LIKE),
165 __MODE(29500, 720, 576, 944, 625, P, P, 145.592111636, 1_1, 0x7, PAL_LIKE, 800, 600),
166 MODE(36000, 800, 600, 960, 750, P, P, 119.304647022, 5_6, 0x6, PAL_LIKE),
167 MODE(39000, 800, 600, 936, 836, P, P, 110.127366499, 3_4, 0x1, PAL_LIKE),
168 MODE(39273, 800, 600, 1040, 630, P, P, 145.816809399, 5_6, 0x4, NTSC_LIKE),
169 MODE(43636, 800, 600, 1040, 700, P, P, 131.235128487, 3_4, 0x2, NTSC_LIKE),
170 MODE(47832, 800, 600, 1064, 750, P, P, 119.723275165, 7_10, 0x1, NTSC_LIKE),
171 {}
172};
173
174struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
175 struct drm_display_mode *drm_mode)
176{
177 struct ch7006_priv *priv = to_ch7006_priv(encoder);
178 struct ch7006_mode *mode;
179
180 for (mode = ch7006_modes; mode->mode.clock; mode++) {
181
182 if (~mode->valid_norms & 1<<priv->norm)
183 continue;
184
185 if (mode->mode.hdisplay != drm_mode->hdisplay ||
186 mode->mode.vdisplay != drm_mode->vdisplay ||
187 mode->mode.vtotal != drm_mode->vtotal ||
188 mode->mode.htotal != drm_mode->htotal ||
189 mode->mode.clock != drm_mode->clock)
190 continue;
191
192 return mode;
193 }
194
195 return NULL;
196}
197
198/* Some common HW state calculation code */
199
200void ch7006_setup_levels(struct drm_encoder *encoder)
201{
202 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
203 struct ch7006_priv *priv = to_ch7006_priv(encoder);
204 uint8_t *regs = priv->state.regs;
205 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
206 int gain;
207 int black_level;
208
209 /* Set DAC_GAIN if the voltage drop between white and black is
210 * high enough. */
211 if (norm->black_level < 339*fixed1/1000) {
212 gain = 76;
213
214 regs[CH7006_INPUT_FORMAT] |= CH7006_INPUT_FORMAT_DAC_GAIN;
215 } else {
216 gain = 71;
217
218 regs[CH7006_INPUT_FORMAT] &= ~CH7006_INPUT_FORMAT_DAC_GAIN;
219 }
220
221 black_level = round_fixed(norm->black_level*26625)/gain;
222
223 /* Correct it with the specified brightness. */
224 black_level = interpolate(90, black_level, 208, priv->brightness);
225
226 regs[CH7006_BLACK_LEVEL] = bitf(CH7006_BLACK_LEVEL_0, black_level);
227
228 ch7006_dbg(client, "black level: %d\n", black_level);
229}
230
231void ch7006_setup_subcarrier(struct drm_encoder *encoder)
232{
233 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
234 struct ch7006_priv *priv = to_ch7006_priv(encoder);
235 struct ch7006_state *state = &priv->state;
236 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
237 struct ch7006_mode *mode = priv->mode;
238 uint32_t subc_inc;
239
240 subc_inc = round_fixed((mode->subc_coeff >> 8)
241 * (norm->subc_freq >> 24));
242
243 setbitf(state, CH7006_SUBC_INC0, 28, subc_inc);
244 setbitf(state, CH7006_SUBC_INC1, 24, subc_inc);
245 setbitf(state, CH7006_SUBC_INC2, 20, subc_inc);
246 setbitf(state, CH7006_SUBC_INC3, 16, subc_inc);
247 setbitf(state, CH7006_SUBC_INC4, 12, subc_inc);
248 setbitf(state, CH7006_SUBC_INC5, 8, subc_inc);
249 setbitf(state, CH7006_SUBC_INC6, 4, subc_inc);
250 setbitf(state, CH7006_SUBC_INC7, 0, subc_inc);
251
252 ch7006_dbg(client, "subcarrier inc: %u\n", subc_inc);
253}
254
255void ch7006_setup_pll(struct drm_encoder *encoder)
256{
257 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
258 struct ch7006_priv *priv = to_ch7006_priv(encoder);
259 uint8_t *regs = priv->state.regs;
260 struct ch7006_mode *mode = priv->mode;
261 int n, best_n = 0;
262 int m, best_m = 0;
263 int freq, best_freq = 0;
264
265 for (n = 0; n < CH7006_MAXN; n++) {
266 for (m = 0; m < CH7006_MAXM; m++) {
267 freq = CH7006_FREQ0*(n+2)/(m+2);
268
269 if (abs(freq - mode->mode.clock) <
270 abs(best_freq - mode->mode.clock)) {
271 best_freq = freq;
272 best_n = n;
273 best_m = m;
274 }
275 }
276 }
277
278 regs[CH7006_PLLOV] = bitf(CH7006_PLLOV_N_8, best_n) |
279 bitf(CH7006_PLLOV_M_8, best_m);
280
281 regs[CH7006_PLLM] = bitf(CH7006_PLLM_0, best_m);
282 regs[CH7006_PLLN] = bitf(CH7006_PLLN_0, best_n);
283
284 if (best_n < 108)
285 regs[CH7006_PLL_CONTROL] |= CH7006_PLL_CONTROL_CAPACITOR;
286 else
287 regs[CH7006_PLL_CONTROL] &= ~CH7006_PLL_CONTROL_CAPACITOR;
288
289 ch7006_dbg(client, "n=%d m=%d f=%d c=%d\n",
290 best_n, best_m, best_freq, best_n < 108);
291}
292
293void ch7006_setup_power_state(struct drm_encoder *encoder)
294{
295 struct ch7006_priv *priv = to_ch7006_priv(encoder);
296 uint8_t *power = &priv->state.regs[CH7006_POWER];
297 int subconnector;
298
299 subconnector = priv->select_subconnector ? priv->select_subconnector :
300 priv->subconnector;
301
302 *power = CH7006_POWER_RESET;
303
304 if (priv->last_dpms == DRM_MODE_DPMS_ON) {
305 switch (subconnector) {
306 case DRM_MODE_SUBCONNECTOR_SVIDEO:
307 *power |= bitfs(CH7006_POWER_LEVEL, CVBS_OFF);
308 break;
309 case DRM_MODE_SUBCONNECTOR_Composite:
310 *power |= bitfs(CH7006_POWER_LEVEL, SVIDEO_OFF);
311 break;
312 case DRM_MODE_SUBCONNECTOR_SCART:
313 *power |= bitfs(CH7006_POWER_LEVEL, NORMAL) |
314 CH7006_POWER_SCART;
315 break;
316 }
317
318 } else {
319 *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
320 }
321}
322
323void ch7006_setup_properties(struct drm_encoder *encoder)
324{
325 struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
326 struct ch7006_priv *priv = to_ch7006_priv(encoder);
327 struct ch7006_state *state = &priv->state;
328 struct ch7006_tv_norm_info *norm = &ch7006_tv_norms[priv->norm];
329 struct ch7006_mode *ch_mode = priv->mode;
330 struct drm_display_mode *mode = &ch_mode->mode;
331 uint8_t *regs = state->regs;
332 int flicker, contrast, hpos, vpos;
333 uint64_t scale, aspect;
334
335 flicker = interpolate(0, 2, 3, priv->flicker);
336 regs[CH7006_FFILTER] = bitf(CH7006_FFILTER_TEXT, flicker) |
337 bitf(CH7006_FFILTER_LUMA, flicker) |
338 bitf(CH7006_FFILTER_CHROMA, 1);
339
340 contrast = interpolate(0, 5, 7, priv->contrast);
341 regs[CH7006_CONTRAST] = bitf(CH7006_CONTRAST_0, contrast);
342
343 scale = norm->vtotal*fixed1;
344 do_div(scale, mode->vtotal);
345
346 aspect = ch_mode->enc_hdisp*fixed1;
347 do_div(aspect, ch_mode->enc_vdisp);
348
349 hpos = round_fixed((norm->hvirtual * aspect - mode->hdisplay * scale)
350 * priv->hmargin * mode->vtotal) / norm->vtotal / 100 / 4;
351
352 setbitf(state, CH7006_POV, HPOS_8, hpos);
353 setbitf(state, CH7006_HPOS, 0, hpos);
354
355 vpos = max(0, norm->vdisplay - round_fixed(mode->vdisplay*scale)
356 + norm->voffset) * priv->vmargin / 100 / 2;
357
358 setbitf(state, CH7006_POV, VPOS_8, vpos);
359 setbitf(state, CH7006_VPOS, 0, vpos);
360
361 ch7006_dbg(client, "hpos: %d, vpos: %d\n", hpos, vpos);
362}
363
364/* HW access functions */
365
366void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val)
367{
368 uint8_t buf[] = {addr, val};
369 int ret;
370
371 ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
372 if (ret < 0)
373 ch7006_err(client, "Error %d writing to subaddress 0x%x\n",
374 ret, addr);
375}
376
377uint8_t ch7006_read(struct i2c_client *client, uint8_t addr)
378{
379 uint8_t val;
380 int ret;
381
382 ret = i2c_master_send(client, &addr, sizeof(addr));
383 if (ret < 0)
384 goto fail;
385
386 ret = i2c_master_recv(client, &val, sizeof(val));
387 if (ret < 0)
388 goto fail;
389
390 return val;
391
392fail:
393 ch7006_err(client, "Error %d reading from subaddress 0x%x\n",
394 ret, addr);
395 return 0;
396}
397
398void ch7006_state_load(struct i2c_client *client,
399 struct ch7006_state *state)
400{
401 ch7006_load_reg(client, state, CH7006_POWER);
402
403 ch7006_load_reg(client, state, CH7006_DISPMODE);
404 ch7006_load_reg(client, state, CH7006_FFILTER);
405 ch7006_load_reg(client, state, CH7006_BWIDTH);
406 ch7006_load_reg(client, state, CH7006_INPUT_FORMAT);
407 ch7006_load_reg(client, state, CH7006_CLKMODE);
408 ch7006_load_reg(client, state, CH7006_START_ACTIVE);
409 ch7006_load_reg(client, state, CH7006_POV);
410 ch7006_load_reg(client, state, CH7006_BLACK_LEVEL);
411 ch7006_load_reg(client, state, CH7006_HPOS);
412 ch7006_load_reg(client, state, CH7006_VPOS);
413 ch7006_load_reg(client, state, CH7006_INPUT_SYNC);
414 ch7006_load_reg(client, state, CH7006_DETECT);
415 ch7006_load_reg(client, state, CH7006_CONTRAST);
416 ch7006_load_reg(client, state, CH7006_PLLOV);
417 ch7006_load_reg(client, state, CH7006_PLLM);
418 ch7006_load_reg(client, state, CH7006_PLLN);
419 ch7006_load_reg(client, state, CH7006_BCLKOUT);
420 ch7006_load_reg(client, state, CH7006_SUBC_INC0);
421 ch7006_load_reg(client, state, CH7006_SUBC_INC1);
422 ch7006_load_reg(client, state, CH7006_SUBC_INC2);
423 ch7006_load_reg(client, state, CH7006_SUBC_INC3);
424 ch7006_load_reg(client, state, CH7006_SUBC_INC4);
425 ch7006_load_reg(client, state, CH7006_SUBC_INC5);
426 ch7006_load_reg(client, state, CH7006_SUBC_INC6);
427 ch7006_load_reg(client, state, CH7006_SUBC_INC7);
428 ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
429 ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
430
431 /* I don't know what this is for, but otherwise I get no
432 * signal.
433 */
434 ch7006_write(client, 0x3d, 0x0);
435}
436
437void ch7006_state_save(struct i2c_client *client,
438 struct ch7006_state *state)
439{
440 ch7006_save_reg(client, state, CH7006_POWER);
441
442 ch7006_save_reg(client, state, CH7006_DISPMODE);
443 ch7006_save_reg(client, state, CH7006_FFILTER);
444 ch7006_save_reg(client, state, CH7006_BWIDTH);
445 ch7006_save_reg(client, state, CH7006_INPUT_FORMAT);
446 ch7006_save_reg(client, state, CH7006_CLKMODE);
447 ch7006_save_reg(client, state, CH7006_START_ACTIVE);
448 ch7006_save_reg(client, state, CH7006_POV);
449 ch7006_save_reg(client, state, CH7006_BLACK_LEVEL);
450 ch7006_save_reg(client, state, CH7006_HPOS);
451 ch7006_save_reg(client, state, CH7006_VPOS);
452 ch7006_save_reg(client, state, CH7006_INPUT_SYNC);
453 ch7006_save_reg(client, state, CH7006_DETECT);
454 ch7006_save_reg(client, state, CH7006_CONTRAST);
455 ch7006_save_reg(client, state, CH7006_PLLOV);
456 ch7006_save_reg(client, state, CH7006_PLLM);
457 ch7006_save_reg(client, state, CH7006_PLLN);
458 ch7006_save_reg(client, state, CH7006_BCLKOUT);
459 ch7006_save_reg(client, state, CH7006_SUBC_INC0);
460 ch7006_save_reg(client, state, CH7006_SUBC_INC1);
461 ch7006_save_reg(client, state, CH7006_SUBC_INC2);
462 ch7006_save_reg(client, state, CH7006_SUBC_INC3);
463 ch7006_save_reg(client, state, CH7006_SUBC_INC4);
464 ch7006_save_reg(client, state, CH7006_SUBC_INC5);
465 ch7006_save_reg(client, state, CH7006_SUBC_INC6);
466 ch7006_save_reg(client, state, CH7006_SUBC_INC7);
467 ch7006_save_reg(client, state, CH7006_PLL_CONTROL);
468 ch7006_save_reg(client, state, CH7006_CALC_SUBC_INC0);
469
470 state->regs[CH7006_FFILTER] = (state->regs[CH7006_FFILTER] & 0xf0) |
471 (state->regs[CH7006_FFILTER] & 0x0c) >> 2 |
472 (state->regs[CH7006_FFILTER] & 0x03) << 2;
473}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
new file mode 100644
index 000000000000..b06d3d93d8ac
--- /dev/null
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -0,0 +1,344 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __DRM_I2C_CH7006_PRIV_H__
28#define __DRM_I2C_CH7006_PRIV_H__
29
30#include "drmP.h"
31#include "drm_crtc_helper.h"
32#include "drm_encoder_slave.h"
33#include "i2c/ch7006.h"
34
35typedef int64_t fixed;
36#define fixed1 (1LL << 32)
37
38enum ch7006_tv_norm {
39 TV_NORM_PAL,
40 TV_NORM_PAL_M,
41 TV_NORM_PAL_N,
42 TV_NORM_PAL_NC,
43 TV_NORM_PAL_60,
44 TV_NORM_NTSC_M,
45 TV_NORM_NTSC_J,
46 NUM_TV_NORMS
47};
48
49struct ch7006_tv_norm_info {
50 fixed vrefresh;
51 int vdisplay;
52 int vtotal;
53 int hvirtual;
54
55 fixed subc_freq;
56 fixed black_level;
57
58 uint32_t dispmode;
59 int voffset;
60};
61
62struct ch7006_mode {
63 struct drm_display_mode mode;
64
65 int enc_hdisp;
66 int enc_vdisp;
67
68 fixed subc_coeff;
69 uint32_t dispmode;
70
71 uint32_t valid_scales;
72 uint32_t valid_norms;
73};
74
75struct ch7006_state {
76 uint8_t regs[0x26];
77};
78
79struct ch7006_priv {
80 struct ch7006_encoder_params *params;
81 struct ch7006_mode *mode;
82
83 struct ch7006_state state;
84 struct ch7006_state saved_state;
85
86 struct drm_property *scale_property;
87
88 int select_subconnector;
89 int subconnector;
90 int hmargin;
91 int vmargin;
92 enum ch7006_tv_norm norm;
93 int brightness;
94 int contrast;
95 int flicker;
96 int scale;
97
98 int last_dpms;
99};
100
101#define to_ch7006_priv(x) \
102 ((struct ch7006_priv *)to_encoder_slave(x)->slave_priv)
103
104extern int ch7006_debug;
105extern char *ch7006_tv_norm;
106extern int ch7006_scale;
107
108extern char *ch7006_tv_norm_names[];
109extern struct ch7006_tv_norm_info ch7006_tv_norms[];
110extern struct ch7006_mode ch7006_modes[];
111
112struct ch7006_mode *ch7006_lookup_mode(struct drm_encoder *encoder,
113 struct drm_display_mode *drm_mode);
114
115void ch7006_setup_levels(struct drm_encoder *encoder);
116void ch7006_setup_subcarrier(struct drm_encoder *encoder);
117void ch7006_setup_pll(struct drm_encoder *encoder);
118void ch7006_setup_power_state(struct drm_encoder *encoder);
119void ch7006_setup_properties(struct drm_encoder *encoder);
120
121void ch7006_write(struct i2c_client *client, uint8_t addr, uint8_t val);
122uint8_t ch7006_read(struct i2c_client *client, uint8_t addr);
123
124void ch7006_state_load(struct i2c_client *client,
125 struct ch7006_state *state);
126void ch7006_state_save(struct i2c_client *client,
127 struct ch7006_state *state);
128
129/* Some helper macros */
130
131#define ch7006_dbg(client, format, ...) do { \
132 if (ch7006_debug) \
133 dev_printk(KERN_DEBUG, &client->dev, \
134 "%s: " format, __func__, ## __VA_ARGS__); \
135 } while (0)
136#define ch7006_info(client, format, ...) \
137 dev_info(&client->dev, format, __VA_ARGS__)
138#define ch7006_err(client, format, ...) \
139 dev_err(&client->dev, format, __VA_ARGS__)
140
141#define __mask(src, bitfield) \
142 (((2 << (1 ? bitfield)) - 1) & ~((1 << (0 ? bitfield)) - 1))
143#define mask(bitfield) __mask(bitfield)
144
145#define __bitf(src, bitfield, x) \
146 (((x) >> (src) << (0 ? bitfield)) & __mask(src, bitfield))
147#define bitf(bitfield, x) __bitf(bitfield, x)
148#define bitfs(bitfield, s) __bitf(bitfield, bitfield##_##s)
149#define setbitf(state, reg, bitfield, x) \
150 state->regs[reg] = (state->regs[reg] & ~mask(reg##_##bitfield)) \
151 | bitf(reg##_##bitfield, x)
152
153#define __unbitf(src, bitfield, x) \
154 ((x & __mask(src, bitfield)) >> (0 ? bitfield) << (src))
155#define unbitf(bitfield, x) __unbitf(bitfield, x)
156
157static inline int interpolate(int y0, int y1, int y2, int x)
158{
159 return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
160}
161
162static inline int32_t round_fixed(fixed x)
163{
164 return (x + fixed1/2) >> 32;
165}
166
167#define ch7006_load_reg(client, state, reg) ch7006_write(client, reg, state->regs[reg])
168#define ch7006_save_reg(client, state, reg) state->regs[reg] = ch7006_read(client, reg)
169
170/* Fixed hardware specs */
171
172#define CH7006_FREQ0 14318
173#define CH7006_MAXN 650
174#define CH7006_MAXM 315
175
176/* Register definitions */
177
178#define CH7006_DISPMODE 0x00
179#define CH7006_DISPMODE_INPUT_RES 0, 7:5
180#define CH7006_DISPMODE_INPUT_RES_512x384 0x0
181#define CH7006_DISPMODE_INPUT_RES_720x400 0x1
182#define CH7006_DISPMODE_INPUT_RES_640x400 0x2
183#define CH7006_DISPMODE_INPUT_RES_640x480 0x3
184#define CH7006_DISPMODE_INPUT_RES_800x600 0x4
185#define CH7006_DISPMODE_INPUT_RES_NATIVE 0x5
186#define CH7006_DISPMODE_OUTPUT_STD 0, 4:3
187#define CH7006_DISPMODE_OUTPUT_STD_PAL 0x0
188#define CH7006_DISPMODE_OUTPUT_STD_NTSC 0x1
189#define CH7006_DISPMODE_OUTPUT_STD_PAL_M 0x2
190#define CH7006_DISPMODE_OUTPUT_STD_NTSC_J 0x3
191#define CH7006_DISPMODE_SCALING_RATIO 0, 2:0
192#define CH7006_DISPMODE_SCALING_RATIO_5_4 0x0
193#define CH7006_DISPMODE_SCALING_RATIO_1_1 0x1
194#define CH7006_DISPMODE_SCALING_RATIO_7_8 0x2
195#define CH7006_DISPMODE_SCALING_RATIO_5_6 0x3
196#define CH7006_DISPMODE_SCALING_RATIO_3_4 0x4
197#define CH7006_DISPMODE_SCALING_RATIO_7_10 0x5
198
199#define CH7006_FFILTER 0x01
200#define CH7006_FFILTER_TEXT 0, 5:4
201#define CH7006_FFILTER_LUMA 0, 3:2
202#define CH7006_FFILTER_CHROMA 0, 1:0
203#define CH7006_FFILTER_CHROMA_NO_DCRAWL 0x3
204
205#define CH7006_BWIDTH 0x03
206#define CH7006_BWIDTH_5L_FFILER (1 << 7)
207#define CH7006_BWIDTH_CVBS_NO_CHROMA (1 << 6)
208#define CH7006_BWIDTH_CHROMA 0, 5:4
209#define CH7006_BWIDTH_SVIDEO_YPEAK (1 << 3)
210#define CH7006_BWIDTH_SVIDEO_LUMA 0, 2:1
211#define CH7006_BWIDTH_CVBS_LUMA 0, 0:0
212
213#define CH7006_INPUT_FORMAT 0x04
214#define CH7006_INPUT_FORMAT_DAC_GAIN (1 << 6)
215#define CH7006_INPUT_FORMAT_RGB_PASS_THROUGH (1 << 5)
216#define CH7006_INPUT_FORMAT_FORMAT 0, 3:0
217#define CH7006_INPUT_FORMAT_FORMAT_RGB16 0x0
218#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m16 0x1
219#define CH7006_INPUT_FORMAT_FORMAT_RGB24m16 0x2
220#define CH7006_INPUT_FORMAT_FORMAT_RGB15 0x3
221#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12C 0x4
222#define CH7006_INPUT_FORMAT_FORMAT_RGB24m12I 0x5
223#define CH7006_INPUT_FORMAT_FORMAT_RGB24m8 0x6
224#define CH7006_INPUT_FORMAT_FORMAT_RGB16m8 0x7
225#define CH7006_INPUT_FORMAT_FORMAT_RGB15m8 0x8
226#define CH7006_INPUT_FORMAT_FORMAT_YCrCb24m8 0x9
227
228#define CH7006_CLKMODE 0x06
229#define CH7006_CLKMODE_SUBC_LOCK (1 << 7)
230#define CH7006_CLKMODE_MASTER (1 << 6)
231#define CH7006_CLKMODE_POS_EDGE (1 << 4)
232#define CH7006_CLKMODE_XCM 0, 3:2
233#define CH7006_CLKMODE_PCM 0, 1:0
234
235#define CH7006_START_ACTIVE 0x07
236#define CH7006_START_ACTIVE_0 0, 7:0
237
238#define CH7006_POV 0x08
239#define CH7006_POV_START_ACTIVE_8 8, 2:2
240#define CH7006_POV_HPOS_8 8, 1:1
241#define CH7006_POV_VPOS_8 8, 0:0
242
243#define CH7006_BLACK_LEVEL 0x09
244#define CH7006_BLACK_LEVEL_0 0, 7:0
245
246#define CH7006_HPOS 0x0a
247#define CH7006_HPOS_0 0, 7:0
248
249#define CH7006_VPOS 0x0b
250#define CH7006_VPOS_0 0, 7:0
251
252#define CH7006_INPUT_SYNC 0x0d
253#define CH7006_INPUT_SYNC_EMBEDDED (1 << 3)
254#define CH7006_INPUT_SYNC_OUTPUT (1 << 2)
255#define CH7006_INPUT_SYNC_PVSYNC (1 << 1)
256#define CH7006_INPUT_SYNC_PHSYNC (1 << 0)
257
258#define CH7006_POWER 0x0e
259#define CH7006_POWER_SCART (1 << 4)
260#define CH7006_POWER_RESET (1 << 3)
261#define CH7006_POWER_LEVEL 0, 2:0
262#define CH7006_POWER_LEVEL_CVBS_OFF 0x0
263#define CH7006_POWER_LEVEL_POWER_OFF 0x1
264#define CH7006_POWER_LEVEL_SVIDEO_OFF 0x2
265#define CH7006_POWER_LEVEL_NORMAL 0x3
266#define CH7006_POWER_LEVEL_FULL_POWER_OFF 0x4
267
268#define CH7006_DETECT 0x10
269#define CH7006_DETECT_SVIDEO_Y_TEST (1 << 3)
270#define CH7006_DETECT_SVIDEO_C_TEST (1 << 2)
271#define CH7006_DETECT_CVBS_TEST (1 << 1)
272#define CH7006_DETECT_SENSE (1 << 0)
273
274#define CH7006_CONTRAST 0x11
275#define CH7006_CONTRAST_0 0, 2:0
276
277#define CH7006_PLLOV 0x13
278#define CH7006_PLLOV_N_8 8, 2:1
279#define CH7006_PLLOV_M_8 8, 0:0
280
281#define CH7006_PLLM 0x14
282#define CH7006_PLLM_0 0, 7:0
283
284#define CH7006_PLLN 0x15
285#define CH7006_PLLN_0 0, 7:0
286
287#define CH7006_BCLKOUT 0x17
288
289#define CH7006_SUBC_INC0 0x18
290#define CH7006_SUBC_INC0_28 28, 3:0
291
292#define CH7006_SUBC_INC1 0x19
293#define CH7006_SUBC_INC1_24 24, 3:0
294
295#define CH7006_SUBC_INC2 0x1a
296#define CH7006_SUBC_INC2_20 20, 3:0
297
298#define CH7006_SUBC_INC3 0x1b
299#define CH7006_SUBC_INC3_GPIO1_VAL (1 << 7)
300#define CH7006_SUBC_INC3_GPIO0_VAL (1 << 6)
301#define CH7006_SUBC_INC3_POUT_3_3V (1 << 5)
302#define CH7006_SUBC_INC3_POUT_INV (1 << 4)
303#define CH7006_SUBC_INC3_16 16, 3:0
304
305#define CH7006_SUBC_INC4 0x1c
306#define CH7006_SUBC_INC4_GPIO1_IN (1 << 7)
307#define CH7006_SUBC_INC4_GPIO0_IN (1 << 6)
308#define CH7006_SUBC_INC4_DS_INPUT (1 << 4)
309#define CH7006_SUBC_INC4_12 12, 3:0
310
311#define CH7006_SUBC_INC5 0x1d
312#define CH7006_SUBC_INC5_8 8, 3:0
313
314#define CH7006_SUBC_INC6 0x1e
315#define CH7006_SUBC_INC6_4 4, 3:0
316
317#define CH7006_SUBC_INC7 0x1f
318#define CH7006_SUBC_INC7_0 0, 3:0
319
320#define CH7006_PLL_CONTROL 0x20
321#define CH7006_PLL_CONTROL_CPI (1 << 5)
322#define CH7006_PLL_CONTROL_CAPACITOR (1 << 4)
323#define CH7006_PLL_CONTROL_7STAGES (1 << 3)
324#define CH7006_PLL_CONTROL_DIGITAL_5V (1 << 2)
325#define CH7006_PLL_CONTROL_ANALOG_5V (1 << 1)
326#define CH7006_PLL_CONTROL_MEMORY_5V (1 << 0)
327
328#define CH7006_CALC_SUBC_INC0 0x21
329#define CH7006_CALC_SUBC_INC0_24 24, 4:3
330#define CH7006_CALC_SUBC_INC0_HYST 0, 2:1
331#define CH7006_CALC_SUBC_INC0_AUTO (1 << 0)
332
333#define CH7006_CALC_SUBC_INC1 0x22
334#define CH7006_CALC_SUBC_INC1_16 16, 7:0
335
336#define CH7006_CALC_SUBC_INC2 0x23
337#define CH7006_CALC_SUBC_INC2_8 8, 7:0
338
339#define CH7006_CALC_SUBC_INC3 0x24
340#define CH7006_CALC_SUBC_INC3_0 0, 7:0
341
342#define CH7006_VERSION_ID 0x25
343
344#endif
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fa7b9be096bc..9929f84ec3e1 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
15 intel_lvds.o \ 15 intel_lvds.o \
16 intel_bios.o \ 16 intel_bios.o \
17 intel_dp.o \ 17 intel_dp.o \
18 intel_dp_i2c.o \
19 intel_hdmi.o \ 18 intel_hdmi.o \
20 intel_sdvo.o \ 19 intel_sdvo.o \
21 intel_modes.o \ 20 intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
23 intel_fb.o \ 22 intel_fb.o \
24 intel_tv.o \ 23 intel_tv.o \
25 intel_dvo.o \ 24 intel_dvo.o \
25 intel_overlay.o \
26 dvo_ch7xxx.o \ 26 dvo_ch7xxx.o \
27 dvo_ch7017.o \ 27 dvo_ch7017.o \
28 dvo_ivch.o \ 28 dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
index 621815b531db..1184c14ba87d 100644
--- a/drivers/gpu/drm/i915/dvo_ch7017.c
+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
249 if (val != CH7017_DEVICE_ID_VALUE && 249 if (val != CH7017_DEVICE_ID_VALUE &&
250 val != CH7018_DEVICE_ID_VALUE && 250 val != CH7018_DEVICE_ID_VALUE &&
251 val != CH7019_DEVICE_ID_VALUE) { 251 val != CH7019_DEVICE_ID_VALUE) {
252 DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n", 252 DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
253 "Slave %d.\n",
253 val, i2cbus->adapter.name,dvo->slave_addr); 254 val, i2cbus->adapter.name,dvo->slave_addr);
254 goto fail; 255 goto fail;
255 } 256 }
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
284 uint8_t horizontal_active_pixel_output, vertical_active_line_output; 285 uint8_t horizontal_active_pixel_output, vertical_active_line_output;
285 uint8_t active_input_line_output; 286 uint8_t active_input_line_output;
286 287
287 DRM_DEBUG("Registers before mode setting\n"); 288 DRM_DEBUG_KMS("Registers before mode setting\n");
288 ch7017_dump_regs(dvo); 289 ch7017_dump_regs(dvo);
289 290
290 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/ 291 /* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
346 /* Turn the LVDS back on with new settings. */ 347 /* Turn the LVDS back on with new settings. */
347 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down); 348 ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
348 349
349 DRM_DEBUG("Registers after mode setting\n"); 350 DRM_DEBUG_KMS("Registers after mode setting\n");
350 ch7017_dump_regs(dvo); 351 ch7017_dump_regs(dvo);
351} 352}
352 353
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
386#define DUMP(reg) \ 387#define DUMP(reg) \
387do { \ 388do { \
388 ch7017_read(dvo, reg, &val); \ 389 ch7017_read(dvo, reg, &val); \
389 DRM_DEBUG(#reg ": %02x\n", val); \ 390 DRM_DEBUG_KMS(#reg ": %02x\n", val); \
390} while (0) 391} while (0)
391 392
392 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT); 393 DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index a9b896289680..d56ff5cc22b2 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
152 }; 152 };
153 153
154 if (!ch7xxx->quiet) { 154 if (!ch7xxx->quiet) {
155 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 155 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
156 addr, i2cbus->adapter.name, dvo->slave_addr); 156 addr, i2cbus->adapter.name, dvo->slave_addr);
157 } 157 }
158 return false; 158 return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
179 return true; 179 return true;
180 180
181 if (!ch7xxx->quiet) { 181 if (!ch7xxx->quiet) {
182 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 182 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
183 addr, i2cbus->adapter.name, dvo->slave_addr); 183 addr, i2cbus->adapter.name, dvo->slave_addr);
184 } 184 }
185 185
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
207 207
208 name = ch7xxx_get_id(vendor); 208 name = ch7xxx_get_id(vendor);
209 if (!name) { 209 if (!name) {
210 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 210 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
211 "slave %d.\n",
211 vendor, adapter->name, dvo->slave_addr); 212 vendor, adapter->name, dvo->slave_addr);
212 goto out; 213 goto out;
213 } 214 }
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
217 goto out; 218 goto out;
218 219
219 if (device != CH7xxx_DID) { 220 if (device != CH7xxx_DID) {
220 DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n", 221 DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
222 "slave %d.\n",
221 vendor, adapter->name, dvo->slave_addr); 223 vendor, adapter->name, dvo->slave_addr);
222 goto out; 224 goto out;
223 } 225 }
224 226
225 ch7xxx->quiet = false; 227 ch7xxx->quiet = false;
226 DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n", 228 DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
227 name, vendor, device); 229 name, vendor, device);
228 return true; 230 return true;
229out: 231out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
315 317
316 for (i = 0; i < CH7xxx_NUM_REGS; i++) { 318 for (i = 0; i < CH7xxx_NUM_REGS; i++) {
317 if ((i % 8) == 0 ) 319 if ((i % 8) == 0 )
318 DRM_DEBUG("\n %02X: ", i); 320 DRM_LOG_KMS("\n %02X: ", i);
319 DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]); 321 DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
320 } 322 }
321} 323}
322 324
diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
index aa176f9921fe..24169e528f0f 100644
--- a/drivers/gpu/drm/i915/dvo_ivch.c
+++ b/drivers/gpu/drm/i915/dvo_ivch.c
@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
202 }; 202 };
203 203
204 if (!priv->quiet) { 204 if (!priv->quiet) {
205 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 205 DRM_DEBUG_KMS("Unable to read register 0x%02x from "
206 "%s:%02x.\n",
206 addr, i2cbus->adapter.name, dvo->slave_addr); 207 addr, i2cbus->adapter.name, dvo->slave_addr);
207 } 208 }
208 return false; 209 return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
230 return true; 231 return true;
231 232
232 if (!priv->quiet) { 233 if (!priv->quiet) {
233 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 234 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
234 addr, i2cbus->adapter.name, dvo->slave_addr); 235 addr, i2cbus->adapter.name, dvo->slave_addr);
235 } 236 }
236 237
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
261 * the address it's responding on. 262 * the address it's responding on.
262 */ 263 */
263 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) { 264 if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
264 DRM_DEBUG("ivch detect failed due to address mismatch " 265 DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
265 "(%d vs %d)\n", 266 "(%d vs %d)\n",
266 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr); 267 (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
267 goto out; 268 goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
367 uint16_t val; 368 uint16_t val;
368 369
369 ivch_read(dvo, VR00, &val); 370 ivch_read(dvo, VR00, &val);
370 DRM_DEBUG("VR00: 0x%04x\n", val); 371 DRM_LOG_KMS("VR00: 0x%04x\n", val);
371 ivch_read(dvo, VR01, &val); 372 ivch_read(dvo, VR01, &val);
372 DRM_DEBUG("VR01: 0x%04x\n", val); 373 DRM_LOG_KMS("VR01: 0x%04x\n", val);
373 ivch_read(dvo, VR30, &val); 374 ivch_read(dvo, VR30, &val);
374 DRM_DEBUG("VR30: 0x%04x\n", val); 375 DRM_LOG_KMS("VR30: 0x%04x\n", val);
375 ivch_read(dvo, VR40, &val); 376 ivch_read(dvo, VR40, &val);
376 DRM_DEBUG("VR40: 0x%04x\n", val); 377 DRM_LOG_KMS("VR40: 0x%04x\n", val);
377 378
378 /* GPIO registers */ 379 /* GPIO registers */
379 ivch_read(dvo, VR80, &val); 380 ivch_read(dvo, VR80, &val);
380 DRM_DEBUG("VR80: 0x%04x\n", val); 381 DRM_LOG_KMS("VR80: 0x%04x\n", val);
381 ivch_read(dvo, VR81, &val); 382 ivch_read(dvo, VR81, &val);
382 DRM_DEBUG("VR81: 0x%04x\n", val); 383 DRM_LOG_KMS("VR81: 0x%04x\n", val);
383 ivch_read(dvo, VR82, &val); 384 ivch_read(dvo, VR82, &val);
384 DRM_DEBUG("VR82: 0x%04x\n", val); 385 DRM_LOG_KMS("VR82: 0x%04x\n", val);
385 ivch_read(dvo, VR83, &val); 386 ivch_read(dvo, VR83, &val);
386 DRM_DEBUG("VR83: 0x%04x\n", val); 387 DRM_LOG_KMS("VR83: 0x%04x\n", val);
387 ivch_read(dvo, VR84, &val); 388 ivch_read(dvo, VR84, &val);
388 DRM_DEBUG("VR84: 0x%04x\n", val); 389 DRM_LOG_KMS("VR84: 0x%04x\n", val);
389 ivch_read(dvo, VR85, &val); 390 ivch_read(dvo, VR85, &val);
390 DRM_DEBUG("VR85: 0x%04x\n", val); 391 DRM_LOG_KMS("VR85: 0x%04x\n", val);
391 ivch_read(dvo, VR86, &val); 392 ivch_read(dvo, VR86, &val);
392 DRM_DEBUG("VR86: 0x%04x\n", val); 393 DRM_LOG_KMS("VR86: 0x%04x\n", val);
393 ivch_read(dvo, VR87, &val); 394 ivch_read(dvo, VR87, &val);
394 DRM_DEBUG("VR87: 0x%04x\n", val); 395 DRM_LOG_KMS("VR87: 0x%04x\n", val);
395 ivch_read(dvo, VR88, &val); 396 ivch_read(dvo, VR88, &val);
396 DRM_DEBUG("VR88: 0x%04x\n", val); 397 DRM_LOG_KMS("VR88: 0x%04x\n", val);
397 398
398 /* Scratch register 0 - AIM Panel type */ 399 /* Scratch register 0 - AIM Panel type */
399 ivch_read(dvo, VR8E, &val); 400 ivch_read(dvo, VR8E, &val);
400 DRM_DEBUG("VR8E: 0x%04x\n", val); 401 DRM_LOG_KMS("VR8E: 0x%04x\n", val);
401 402
402 /* Scratch register 1 - Status register */ 403 /* Scratch register 1 - Status register */
403 ivch_read(dvo, VR8F, &val); 404 ivch_read(dvo, VR8F, &val);
404 DRM_DEBUG("VR8F: 0x%04x\n", val); 405 DRM_LOG_KMS("VR8F: 0x%04x\n", val);
405} 406}
406 407
407static void ivch_save(struct intel_dvo_device *dvo) 408static void ivch_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
index e1c1f7341e5c..0001c13f0a80 100644
--- a/drivers/gpu/drm/i915/dvo_sil164.c
+++ b/drivers/gpu/drm/i915/dvo_sil164.c
@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
105 }; 105 };
106 106
107 if (!sil->quiet) { 107 if (!sil->quiet) {
108 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 108 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
109 addr, i2cbus->adapter.name, dvo->slave_addr); 109 addr, i2cbus->adapter.name, dvo->slave_addr);
110 } 110 }
111 return false; 111 return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
131 return true; 131 return true;
132 132
133 if (!sil->quiet) { 133 if (!sil->quiet) {
134 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 134 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
135 addr, i2cbus->adapter.name, dvo->slave_addr); 135 addr, i2cbus->adapter.name, dvo->slave_addr);
136 } 136 }
137 137
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
158 goto out; 158 goto out;
159 159
160 if (ch != (SIL164_VID & 0xff)) { 160 if (ch != (SIL164_VID & 0xff)) {
161 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 161 DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
162 ch, adapter->name, dvo->slave_addr); 162 ch, adapter->name, dvo->slave_addr);
163 goto out; 163 goto out;
164 } 164 }
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
167 goto out; 167 goto out;
168 168
169 if (ch != (SIL164_DID & 0xff)) { 169 if (ch != (SIL164_DID & 0xff)) {
170 DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n", 170 DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
171 ch, adapter->name, dvo->slave_addr); 171 ch, adapter->name, dvo->slave_addr);
172 goto out; 172 goto out;
173 } 173 }
174 sil->quiet = false; 174 sil->quiet = false;
175 175
176 DRM_DEBUG("init sil164 dvo controller successfully!\n"); 176 DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
177 return true; 177 return true;
178 178
179out: 179out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
241 uint8_t val; 241 uint8_t val;
242 242
243 sil164_readb(dvo, SIL164_FREQ_LO, &val); 243 sil164_readb(dvo, SIL164_FREQ_LO, &val);
244 DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val); 244 DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
245 sil164_readb(dvo, SIL164_FREQ_HI, &val); 245 sil164_readb(dvo, SIL164_FREQ_HI, &val);
246 DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val); 246 DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
247 sil164_readb(dvo, SIL164_REG8, &val); 247 sil164_readb(dvo, SIL164_REG8, &val);
248 DRM_DEBUG("SIL164_REG8: 0x%02x\n", val); 248 DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
249 sil164_readb(dvo, SIL164_REG9, &val); 249 sil164_readb(dvo, SIL164_REG9, &val);
250 DRM_DEBUG("SIL164_REG9: 0x%02x\n", val); 250 DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
251 sil164_readb(dvo, SIL164_REGC, &val); 251 sil164_readb(dvo, SIL164_REGC, &val);
252 DRM_DEBUG("SIL164_REGC: 0x%02x\n", val); 252 DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
253} 253}
254 254
255static void sil164_save(struct intel_dvo_device *dvo) 255static void sil164_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
index 9ecc907384ec..c7c391bc116a 100644
--- a/drivers/gpu/drm/i915/dvo_tfp410.c
+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
130 }; 130 };
131 131
132 if (!tfp->quiet) { 132 if (!tfp->quiet) {
133 DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n", 133 DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
134 addr, i2cbus->adapter.name, dvo->slave_addr); 134 addr, i2cbus->adapter.name, dvo->slave_addr);
135 } 135 }
136 return false; 136 return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
156 return true; 156 return true;
157 157
158 if (!tfp->quiet) { 158 if (!tfp->quiet) {
159 DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n", 159 DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
160 addr, i2cbus->adapter.name, dvo->slave_addr); 160 addr, i2cbus->adapter.name, dvo->slave_addr);
161 } 161 }
162 162
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
191 tfp->quiet = true; 191 tfp->quiet = true;
192 192
193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { 193 if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
194 DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n", 194 DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
195 "Slave %d.\n",
195 id, adapter->name, dvo->slave_addr); 196 id, adapter->name, dvo->slave_addr);
196 goto out; 197 goto out;
197 } 198 }
198 199
199 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { 200 if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
200 DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n", 201 DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
202 "Slave %d.\n",
201 id, adapter->name, dvo->slave_addr); 203 id, adapter->name, dvo->slave_addr);
202 goto out; 204 goto out;
203 } 205 }
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
262 uint8_t val, val2; 264 uint8_t val, val2;
263 265
264 tfp410_readb(dvo, TFP410_REV, &val); 266 tfp410_readb(dvo, TFP410_REV, &val);
265 DRM_DEBUG("TFP410_REV: 0x%02X\n", val); 267 DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
266 tfp410_readb(dvo, TFP410_CTL_1, &val); 268 tfp410_readb(dvo, TFP410_CTL_1, &val);
267 DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val); 269 DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
268 tfp410_readb(dvo, TFP410_CTL_2, &val); 270 tfp410_readb(dvo, TFP410_CTL_2, &val);
269 DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val); 271 DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
270 tfp410_readb(dvo, TFP410_CTL_3, &val); 272 tfp410_readb(dvo, TFP410_CTL_3, &val);
271 DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val); 273 DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
272 tfp410_readb(dvo, TFP410_USERCFG, &val); 274 tfp410_readb(dvo, TFP410_USERCFG, &val);
273 DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val); 275 DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
274 tfp410_readb(dvo, TFP410_DE_DLY, &val); 276 tfp410_readb(dvo, TFP410_DE_DLY, &val);
275 DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val); 277 DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
276 tfp410_readb(dvo, TFP410_DE_CTL, &val); 278 tfp410_readb(dvo, TFP410_DE_CTL, &val);
277 DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val); 279 DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
278 tfp410_readb(dvo, TFP410_DE_TOP, &val); 280 tfp410_readb(dvo, TFP410_DE_TOP, &val);
279 DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val); 281 DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
280 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); 282 tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
281 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); 283 tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
282 DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); 284 DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
283 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); 285 tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
284 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); 286 tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
285 DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); 287 DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
286 tfp410_readb(dvo, TFP410_H_RES_LO, &val); 288 tfp410_readb(dvo, TFP410_H_RES_LO, &val);
287 tfp410_readb(dvo, TFP410_H_RES_HI, &val2); 289 tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
288 DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val); 290 DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
289 tfp410_readb(dvo, TFP410_V_RES_LO, &val); 291 tfp410_readb(dvo, TFP410_V_RES_LO, &val);
290 tfp410_readb(dvo, TFP410_V_RES_HI, &val2); 292 tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
291 DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val); 293 DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
292} 294}
293 295
294static void tfp410_save(struct intel_dvo_device *dvo) 296static void tfp410_save(struct intel_dvo_device *dvo)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 26bf0552b3cb..18476bf0b580 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,6 +27,7 @@
27 */ 27 */
28 28
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/debugfs.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
96 { 97 {
97 struct drm_gem_object *obj = obj_priv->obj; 98 struct drm_gem_object *obj = obj_priv->obj;
98 99
99 seq_printf(m, " %p: %s %8zd %08x %08x %d %s", 100 seq_printf(m, " %p: %s %8zd %08x %08x %d%s%s",
100 obj, 101 obj,
101 get_pin_flag(obj_priv), 102 get_pin_flag(obj_priv),
102 obj->size, 103 obj->size,
103 obj->read_domains, obj->write_domain, 104 obj->read_domains, obj->write_domain,
104 obj_priv->last_rendering_seqno, 105 obj_priv->last_rendering_seqno,
105 obj_priv->dirty ? "dirty" : ""); 106 obj_priv->dirty ? " dirty" : "",
107 obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
106 108
107 if (obj->name) 109 if (obj->name)
108 seq_printf(m, " (name: %d)", obj->name); 110 seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
160 struct drm_device *dev = node->minor->dev; 162 struct drm_device *dev = node->minor->dev;
161 drm_i915_private_t *dev_priv = dev->dev_private; 163 drm_i915_private_t *dev_priv = dev->dev_private;
162 164
163 if (!IS_IGDNG(dev)) { 165 if (!IS_IRONLAKE(dev)) {
164 seq_printf(m, "Interrupt enable: %08x\n", 166 seq_printf(m, "Interrupt enable: %08x\n",
165 I915_READ(IER)); 167 I915_READ(IER));
166 seq_printf(m, "Interrupt identity: %08x\n", 168 seq_printf(m, "Interrupt identity: %08x\n",
@@ -412,6 +414,109 @@ static int i915_registers_info(struct seq_file *m, void *data) {
412 return 0; 414 return 0;
413} 415}
414 416
417static int
418i915_wedged_open(struct inode *inode,
419 struct file *filp)
420{
421 filp->private_data = inode->i_private;
422 return 0;
423}
424
425static ssize_t
426i915_wedged_read(struct file *filp,
427 char __user *ubuf,
428 size_t max,
429 loff_t *ppos)
430{
431 struct drm_device *dev = filp->private_data;
432 drm_i915_private_t *dev_priv = dev->dev_private;
433 char buf[80];
434 int len;
435
436 len = snprintf(buf, sizeof (buf),
437 "wedged : %d\n",
438 atomic_read(&dev_priv->mm.wedged));
439
440 return simple_read_from_buffer(ubuf, max, ppos, buf, len);
441}
442
443static ssize_t
444i915_wedged_write(struct file *filp,
445 const char __user *ubuf,
446 size_t cnt,
447 loff_t *ppos)
448{
449 struct drm_device *dev = filp->private_data;
450 drm_i915_private_t *dev_priv = dev->dev_private;
451 char buf[20];
452 int val = 1;
453
454 if (cnt > 0) {
455 if (cnt > sizeof (buf) - 1)
456 return -EINVAL;
457
458 if (copy_from_user(buf, ubuf, cnt))
459 return -EFAULT;
460 buf[cnt] = 0;
461
462 val = simple_strtoul(buf, NULL, 0);
463 }
464
465 DRM_INFO("Manually setting wedged to %d\n", val);
466
467 atomic_set(&dev_priv->mm.wedged, val);
468 if (val) {
469 DRM_WAKEUP(&dev_priv->irq_queue);
470 queue_work(dev_priv->wq, &dev_priv->error_work);
471 }
472
473 return cnt;
474}
475
476static const struct file_operations i915_wedged_fops = {
477 .owner = THIS_MODULE,
478 .open = i915_wedged_open,
479 .read = i915_wedged_read,
480 .write = i915_wedged_write,
481};
482
483/* As the drm_debugfs_init() routines are called before dev->dev_private is
484 * allocated we need to hook into the minor for release. */
485static int
486drm_add_fake_info_node(struct drm_minor *minor,
487 struct dentry *ent,
488 const void *key)
489{
490 struct drm_info_node *node;
491
492 node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
493 if (node == NULL) {
494 debugfs_remove(ent);
495 return -ENOMEM;
496 }
497
498 node->minor = minor;
499 node->dent = ent;
500 node->info_ent = (void *) key;
501 list_add(&node->list, &minor->debugfs_nodes.list);
502
503 return 0;
504}
505
506static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
507{
508 struct drm_device *dev = minor->dev;
509 struct dentry *ent;
510
511 ent = debugfs_create_file("i915_wedged",
512 S_IRUGO | S_IWUSR,
513 root, dev,
514 &i915_wedged_fops);
515 if (IS_ERR(ent))
516 return PTR_ERR(ent);
517
518 return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
519}
415 520
416static struct drm_info_list i915_debugfs_list[] = { 521static struct drm_info_list i915_debugfs_list[] = {
417 {"i915_regs", i915_registers_info, 0}, 522 {"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@ static struct drm_info_list i915_debugfs_list[] = {
432 537
433int i915_debugfs_init(struct drm_minor *minor) 538int i915_debugfs_init(struct drm_minor *minor)
434{ 539{
540 int ret;
541
542 ret = i915_wedged_create(minor->debugfs_root, minor);
543 if (ret)
544 return ret;
545
435 return drm_debugfs_create_files(i915_debugfs_list, 546 return drm_debugfs_create_files(i915_debugfs_list,
436 I915_DEBUGFS_ENTRIES, 547 I915_DEBUGFS_ENTRIES,
437 minor->debugfs_root, minor); 548 minor->debugfs_root, minor);
@@ -441,7 +552,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
441{ 552{
442 drm_debugfs_remove_files(i915_debugfs_list, 553 drm_debugfs_remove_files(i915_debugfs_list,
443 I915_DEBUGFS_ENTRIES, minor); 554 I915_DEBUGFS_ENTRIES, minor);
555 drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
556 1, minor);
444} 557}
445 558
446#endif /* CONFIG_DEBUG_FS */ 559#endif /* CONFIG_DEBUG_FS */
447
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e5b138be45fa..701bfeac7f57 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
807 case I915_PARAM_NUM_FENCES_AVAIL: 807 case I915_PARAM_NUM_FENCES_AVAIL:
808 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 808 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
809 break; 809 break;
810 case I915_PARAM_HAS_OVERLAY:
811 value = dev_priv->overlay ? 1 : 0;
812 break;
813 case I915_PARAM_HAS_PAGEFLIPPING:
814 value = 1;
815 break;
810 default: 816 default:
811 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 817 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
812 param->param); 818 param->param);
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
962 * Some of the preallocated space is taken by the GTT 968 * Some of the preallocated space is taken by the GTT
963 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 969 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
964 */ 970 */
965 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev)) 971 if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
966 overhead = 4096; 972 overhead = 4096;
967 else 973 else
968 overhead = (*aperture_size / 1024) + 4096; 974 overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1048 int gtt_offset, gtt_size; 1054 int gtt_offset, gtt_size;
1049 1055
1050 if (IS_I965G(dev)) { 1056 if (IS_I965G(dev)) {
1051 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1057 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
1052 gtt_offset = 2*1024*1024; 1058 gtt_offset = 2*1024*1024;
1053 gtt_size = 2*1024*1024; 1059 gtt_size = 2*1024*1024;
1054 } else { 1060 } else {
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1070 1076
1071 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024)); 1077 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1072 1078
1073 DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry); 1079 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1074 1080
1075 /* Mask out these reserved bits on this hardware. */ 1081 /* Mask out these reserved bits on this hardware. */
1076 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) || 1082 if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
1096 phys =(entry & PTE_ADDRESS_MASK) | 1102 phys =(entry & PTE_ADDRESS_MASK) |
1097 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4)); 1103 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1098 1104
1099 DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys); 1105 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1100 1106
1101 return phys; 1107 return phys;
1102} 1108}
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
1306 drm_i915_private_t *dev_priv = dev->dev_private; 1312 drm_i915_private_t *dev_priv = dev->dev_private;
1307 u32 tmp; 1313 u32 tmp;
1308 1314
1309 if (!IS_IGD(dev)) 1315 if (!IS_PINEVIEW(dev))
1310 return; 1316 return;
1311 1317
1312 tmp = I915_READ(CLKCFG); 1318 tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1413 if (ret) 1419 if (ret)
1414 goto out_iomapfree; 1420 goto out_iomapfree;
1415 1421
1416 dev_priv->wq = create_workqueue("i915"); 1422 dev_priv->wq = create_singlethread_workqueue("i915");
1417 if (dev_priv->wq == NULL) { 1423 if (dev_priv->wq == NULL) {
1418 DRM_ERROR("Failed to create our workqueue.\n"); 1424 DRM_ERROR("Failed to create our workqueue.\n");
1419 ret = -ENOMEM; 1425 ret = -ENOMEM;
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1434 1440
1435 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1441 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1436 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1442 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1437 if (IS_G4X(dev) || IS_IGDNG(dev)) { 1443 if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
1438 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1444 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1439 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1445 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1440 } 1446 }
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1489 } 1495 }
1490 1496
1491 /* Must be done after probing outputs */ 1497 /* Must be done after probing outputs */
1492 /* FIXME: verify on IGDNG */ 1498 intel_opregion_init(dev, 0);
1493 if (!IS_IGDNG(dev))
1494 intel_opregion_init(dev, 0);
1495 1499
1496 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, 1500 setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
1497 (unsigned long) dev); 1501 (unsigned long) dev);
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev)
1525 } 1529 }
1526 1530
1527 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1531 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1532 /*
1533 * free the memory space allocated for the child device
1534 * config parsed from VBT
1535 */
1536 if (dev_priv->child_dev && dev_priv->child_dev_num) {
1537 kfree(dev_priv->child_dev);
1538 dev_priv->child_dev = NULL;
1539 dev_priv->child_dev_num = 0;
1540 }
1528 drm_irq_uninstall(dev); 1541 drm_irq_uninstall(dev);
1529 vga_client_register(dev->pdev, NULL, NULL, NULL); 1542 vga_client_register(dev->pdev, NULL, NULL, NULL);
1530 } 1543 }
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev)
1535 if (dev_priv->regs != NULL) 1548 if (dev_priv->regs != NULL)
1536 iounmap(dev_priv->regs); 1549 iounmap(dev_priv->regs);
1537 1550
1538 if (!IS_IGDNG(dev)) 1551 intel_opregion_free(dev, 0);
1539 intel_opregion_free(dev, 0);
1540 1552
1541 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1553 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1542 intel_modeset_cleanup(dev); 1554 intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev)
1548 mutex_unlock(&dev->struct_mutex); 1560 mutex_unlock(&dev->struct_mutex);
1549 drm_mm_takedown(&dev_priv->vram); 1561 drm_mm_takedown(&dev_priv->vram);
1550 i915_gem_lastclose(dev); 1562 i915_gem_lastclose(dev);
1563
1564 intel_cleanup_overlay(dev);
1551 } 1565 }
1552 1566
1553 pci_dev_put(dev_priv->bridge_dev); 1567 pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
1656 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0), 1670 DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
1657 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 1671 DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1658 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0), 1672 DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
1673 DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1674 DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1659}; 1675};
1660 1676
1661int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); 1677int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 7f436ec075f6..2fa217862058 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -333,6 +333,7 @@ static struct drm_driver driver = {
333 .mmap = drm_gem_mmap, 333 .mmap = drm_gem_mmap,
334 .poll = drm_poll, 334 .poll = drm_poll,
335 .fasync = drm_fasync, 335 .fasync = drm_fasync,
336 .read = drm_read,
336#ifdef CONFIG_COMPAT 337#ifdef CONFIG_COMPAT
337 .compat_ioctl = i915_compat_ioctl, 338 .compat_ioctl = i915_compat_ioctl,
338#endif 339#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a725f6591192..fbecac72f5bb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -170,6 +170,8 @@ struct drm_i915_display_funcs {
170 /* clock gating init */ 170 /* clock gating init */
171}; 171};
172 172
173struct intel_overlay;
174
173typedef struct drm_i915_private { 175typedef struct drm_i915_private {
174 struct drm_device *dev; 176 struct drm_device *dev;
175 177
@@ -187,6 +189,7 @@ typedef struct drm_i915_private {
187 unsigned int status_gfx_addr; 189 unsigned int status_gfx_addr;
188 drm_local_map_t hws_map; 190 drm_local_map_t hws_map;
189 struct drm_gem_object *hws_obj; 191 struct drm_gem_object *hws_obj;
192 struct drm_gem_object *pwrctx;
190 193
191 struct resource mch_res; 194 struct resource mch_res;
192 195
@@ -206,11 +209,13 @@ typedef struct drm_i915_private {
206 /** Cached value of IMR to avoid reads in updating the bitfield */ 209 /** Cached value of IMR to avoid reads in updating the bitfield */
207 u32 irq_mask_reg; 210 u32 irq_mask_reg;
208 u32 pipestat[2]; 211 u32 pipestat[2];
209 /** splitted irq regs for graphics and display engine on IGDNG, 212 /** splitted irq regs for graphics and display engine on Ironlake,
210 irq_mask_reg is still used for display irq. */ 213 irq_mask_reg is still used for display irq. */
211 u32 gt_irq_mask_reg; 214 u32 gt_irq_mask_reg;
212 u32 gt_irq_enable_reg; 215 u32 gt_irq_enable_reg;
213 u32 de_irq_enable_reg; 216 u32 de_irq_enable_reg;
217 u32 pch_irq_mask_reg;
218 u32 pch_irq_enable_reg;
214 219
215 u32 hotplug_supported_mask; 220 u32 hotplug_supported_mask;
216 struct work_struct hotplug_work; 221 struct work_struct hotplug_work;
@@ -240,6 +245,9 @@ typedef struct drm_i915_private {
240 245
241 struct intel_opregion opregion; 246 struct intel_opregion opregion;
242 247
248 /* overlay */
249 struct intel_overlay *overlay;
250
243 /* LVDS info */ 251 /* LVDS info */
244 int backlight_duty_cycle; /* restore backlight to this value */ 252 int backlight_duty_cycle; /* restore backlight to this value */
245 bool panel_wants_dither; 253 bool panel_wants_dither;
@@ -258,7 +266,7 @@ typedef struct drm_i915_private {
258 266
259 struct notifier_block lid_notifier; 267 struct notifier_block lid_notifier;
260 268
261 int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */ 269 int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
262 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 270 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
263 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 271 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
264 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 272 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@ typedef struct drm_i915_private {
280 u32 saveDSPBCNTR; 288 u32 saveDSPBCNTR;
281 u32 saveDSPARB; 289 u32 saveDSPARB;
282 u32 saveRENDERSTANDBY; 290 u32 saveRENDERSTANDBY;
291 u32 savePWRCTXA;
283 u32 saveHWS; 292 u32 saveHWS;
284 u32 savePIPEACONF; 293 u32 savePIPEACONF;
285 u32 savePIPEBCONF; 294 u32 savePIPEBCONF;
@@ -374,8 +383,6 @@ typedef struct drm_i915_private {
374 u32 saveFDI_RXA_IMR; 383 u32 saveFDI_RXA_IMR;
375 u32 saveFDI_RXB_IMR; 384 u32 saveFDI_RXB_IMR;
376 u32 saveCACHE_MODE_0; 385 u32 saveCACHE_MODE_0;
377 u32 saveD_STATE;
378 u32 saveDSPCLK_GATE_D;
379 u32 saveMI_ARB_STATE; 386 u32 saveMI_ARB_STATE;
380 u32 saveSWF0[16]; 387 u32 saveSWF0[16];
381 u32 saveSWF1[16]; 388 u32 saveSWF1[16];
@@ -539,13 +546,21 @@ typedef struct drm_i915_private {
539 /* indicate whether the LVDS_BORDER should be enabled or not */ 546 /* indicate whether the LVDS_BORDER should be enabled or not */
540 unsigned int lvds_border_bits; 547 unsigned int lvds_border_bits;
541 548
549 struct drm_crtc *plane_to_crtc_mapping[2];
550 struct drm_crtc *pipe_to_crtc_mapping[2];
551 wait_queue_head_t pending_flip_queue;
552
542 /* Reclocking support */ 553 /* Reclocking support */
543 bool render_reclock_avail; 554 bool render_reclock_avail;
544 bool lvds_downclock_avail; 555 bool lvds_downclock_avail;
556 /* indicates the reduced downclock for LVDS*/
557 int lvds_downclock;
545 struct work_struct idle_work; 558 struct work_struct idle_work;
546 struct timer_list idle_timer; 559 struct timer_list idle_timer;
547 bool busy; 560 bool busy;
548 u16 orig_clock; 561 u16 orig_clock;
562 int child_dev_num;
563 struct child_device_config *child_dev;
549} drm_i915_private_t; 564} drm_i915_private_t;
550 565
551/** driver private structure attached to each drm_gem_object */ 566/** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@ struct drm_i915_gem_object {
638 * Advice: are the backing pages purgeable? 653 * Advice: are the backing pages purgeable?
639 */ 654 */
640 int madv; 655 int madv;
656
657 /**
658 * Number of crtcs where this object is currently the fb, but
659 * will be page flipped away on the next vblank. When it
660 * reaches 0, dev_priv->pending_flip_queue will be woken up.
661 */
662 atomic_t pending_flip;
641}; 663};
642 664
643/** 665/**
@@ -738,6 +760,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
738void 760void
739i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 761i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
740 762
763void intel_enable_asle (struct drm_device *dev);
764
741 765
742/* i915_mem.c */ 766/* i915_mem.c */
743extern int i915_mem_alloc(struct drm_device *dev, void *data, 767extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
813int i915_gem_do_init(struct drm_device *dev, unsigned long start, 837int i915_gem_do_init(struct drm_device *dev, unsigned long start,
814 unsigned long end); 838 unsigned long end);
815int i915_gem_idle(struct drm_device *dev); 839int i915_gem_idle(struct drm_device *dev);
840uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
841 uint32_t flush_domains);
842int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
816int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 843int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
817int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 844int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
818 int write); 845 int write);
@@ -824,6 +851,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
824int i915_gem_object_get_pages(struct drm_gem_object *obj); 851int i915_gem_object_get_pages(struct drm_gem_object *obj);
825void i915_gem_object_put_pages(struct drm_gem_object *obj); 852void i915_gem_object_put_pages(struct drm_gem_object *obj);
826void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 853void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
854void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
827 855
828void i915_gem_shrinker_init(void); 856void i915_gem_shrinker_init(void);
829void i915_gem_shrinker_exit(void); 857void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@ extern int i915_restore_state(struct drm_device *dev);
863extern int intel_opregion_init(struct drm_device *dev, int resume); 891extern int intel_opregion_init(struct drm_device *dev, int resume);
864extern void intel_opregion_free(struct drm_device *dev, int suspend); 892extern void intel_opregion_free(struct drm_device *dev, int suspend);
865extern void opregion_asle_intr(struct drm_device *dev); 893extern void opregion_asle_intr(struct drm_device *dev);
894extern void ironlake_opregion_gse_intr(struct drm_device *dev);
866extern void opregion_enable_asle(struct drm_device *dev); 895extern void opregion_enable_asle(struct drm_device *dev);
867#else 896#else
868static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; } 897static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
869static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; } 898static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
870static inline void opregion_asle_intr(struct drm_device *dev) { return; } 899static inline void opregion_asle_intr(struct drm_device *dev) { return; }
900static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
871static inline void opregion_enable_asle(struct drm_device *dev) { return; } 901static inline void opregion_enable_asle(struct drm_device *dev) { return; }
872#endif 902#endif
873 903
@@ -955,8 +985,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
955#define IS_I830(dev) ((dev)->pci_device == 0x3577) 985#define IS_I830(dev) ((dev)->pci_device == 0x3577)
956#define IS_845G(dev) ((dev)->pci_device == 0x2562) 986#define IS_845G(dev) ((dev)->pci_device == 0x2562)
957#define IS_I85X(dev) ((dev)->pci_device == 0x3582) 987#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
958#define IS_I855(dev) ((dev)->pci_device == 0x3582)
959#define IS_I865G(dev) ((dev)->pci_device == 0x2572) 988#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
989#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
960 990
961#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 991#define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
962#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 992#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
990 (dev)->pci_device == 0x2E42 || \ 1020 (dev)->pci_device == 0x2E42 || \
991 IS_GM45(dev)) 1021 IS_GM45(dev))
992 1022
993#define IS_IGDG(dev) ((dev)->pci_device == 0xa001) 1023#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
994#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011) 1024#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
995#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev)) 1025#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
996 1026
997#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 1027#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
998 (dev)->pci_device == 0x29B2 || \ 1028 (dev)->pci_device == 0x29B2 || \
999 (dev)->pci_device == 0x29D2 || \ 1029 (dev)->pci_device == 0x29D2 || \
1000 (IS_IGD(dev))) 1030 (IS_PINEVIEW(dev)))
1001 1031
1002#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042) 1032#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1003#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046) 1033#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1004#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev)) 1034#define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
1005 1035
1006#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1036#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
1007 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ 1037 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
1008 IS_IGDNG(dev)) 1038 IS_IRONLAKE(dev))
1009 1039
1010#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1040#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
1011 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 1041 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
1012 IS_IGD(dev) || IS_IGDNG_M(dev)) 1042 IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
1013 1043
1014#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ 1044#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
1015 IS_IGDNG(dev)) 1045 IS_IRONLAKE(dev))
1016/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1046/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1017 * rows, which changed the alignment requirements and fence programming. 1047 * rows, which changed the alignment requirements and fence programming.
1018 */ 1048 */
1019#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 1049#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
1020 IS_I915GM(dev))) 1050 IS_I915GM(dev)))
1021#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1051#define SUPPORTS_DIGITAL_OUTPUTS(dev) (IS_I9XX(dev) && !IS_PINEVIEW(dev))
1022#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1052#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1023#define SUPPORTS_EDP(dev) (IS_IGDNG_M(dev)) 1053#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1054#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1055#define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \
1056 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
1024#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1057#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
1025/* dsparb controlled by hw only */ 1058/* dsparb controlled by hw only */
1026#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1059#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1027 1060
1028#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev)) 1061#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
1029#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev)) 1062#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1030#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1063#define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
1031 (IS_I9XX(dev) || IS_GM45(dev)) && \ 1064 (IS_I9XX(dev) || IS_GM45(dev)) && \
1032 !IS_IGD(dev) && \ 1065 !IS_PINEVIEW(dev) && \
1033 !IS_IGDNG(dev)) 1066 !IS_IRONLAKE(dev))
1067#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
1034 1068
1035#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1069#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1036 1070
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2a3fa599923..8c463cf2050a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1288 list->hash.key = list->file_offset_node->start; 1288 list->hash.key = list->file_offset_node->start;
1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) { 1289 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
1290 DRM_ERROR("failed to add to map hash\n"); 1290 DRM_ERROR("failed to add to map hash\n");
1291 ret = -ENOMEM;
1291 goto out_free_mm; 1292 goto out_free_mm;
1292 } 1293 }
1293 1294
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1583 * 1584 *
1584 * Returned sequence numbers are nonzero on success. 1585 * Returned sequence numbers are nonzero on success.
1585 */ 1586 */
1586static uint32_t 1587uint32_t
1587i915_add_request(struct drm_device *dev, struct drm_file *file_priv, 1588i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1588 uint32_t flush_domains) 1589 uint32_t flush_domains)
1589{ 1590{
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1617 OUT_RING(MI_USER_INTERRUPT); 1618 OUT_RING(MI_USER_INTERRUPT);
1618 ADVANCE_LP_RING(); 1619 ADVANCE_LP_RING();
1619 1620
1620 DRM_DEBUG("%d\n", seqno); 1621 DRM_DEBUG_DRIVER("%d\n", seqno);
1621 1622
1622 request->seqno = seqno; 1623 request->seqno = seqno;
1623 request->emitted_jiffies = jiffies; 1624 request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
1820 mutex_unlock(&dev->struct_mutex); 1821 mutex_unlock(&dev->struct_mutex);
1821} 1822}
1822 1823
1823/** 1824int
1824 * Waits for a sequence number to be signaled, and cleans up the 1825i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
1825 * request and object lists appropriately for that event.
1826 */
1827static int
1828i915_wait_request(struct drm_device *dev, uint32_t seqno)
1829{ 1826{
1830 drm_i915_private_t *dev_priv = dev->dev_private; 1827 drm_i915_private_t *dev_priv = dev->dev_private;
1831 u32 ier; 1828 u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1837 return -EIO; 1834 return -EIO;
1838 1835
1839 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1836 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1840 if (IS_IGDNG(dev)) 1837 if (IS_IRONLAKE(dev))
1841 ier = I915_READ(DEIER) | I915_READ(GTIER); 1838 ier = I915_READ(DEIER) | I915_READ(GTIER);
1842 else 1839 else
1843 ier = I915_READ(IER); 1840 ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1852 1849
1853 dev_priv->mm.waiting_gem_seqno = seqno; 1850 dev_priv->mm.waiting_gem_seqno = seqno;
1854 i915_user_irq_get(dev); 1851 i915_user_irq_get(dev);
1855 ret = wait_event_interruptible(dev_priv->irq_queue, 1852 if (interruptible)
1856 i915_seqno_passed(i915_get_gem_seqno(dev), 1853 ret = wait_event_interruptible(dev_priv->irq_queue,
1857 seqno) || 1854 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1858 atomic_read(&dev_priv->mm.wedged)); 1855 atomic_read(&dev_priv->mm.wedged));
1856 else
1857 wait_event(dev_priv->irq_queue,
1858 i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
1859 atomic_read(&dev_priv->mm.wedged));
1860
1859 i915_user_irq_put(dev); 1861 i915_user_irq_put(dev);
1860 dev_priv->mm.waiting_gem_seqno = 0; 1862 dev_priv->mm.waiting_gem_seqno = 0;
1861 1863
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1879 return ret; 1881 return ret;
1880} 1882}
1881 1883
1884/**
1885 * Waits for a sequence number to be signaled, and cleans up the
1886 * request and object lists appropriately for that event.
1887 */
1888static int
1889i915_wait_request(struct drm_device *dev, uint32_t seqno)
1890{
1891 return i915_do_wait_request(dev, seqno, 1);
1892}
1893
1882static void 1894static void
1883i915_gem_flush(struct drm_device *dev, 1895i915_gem_flush(struct drm_device *dev,
1884 uint32_t invalidate_domains, 1896 uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
1947#endif 1959#endif
1948 BEGIN_LP_RING(2); 1960 BEGIN_LP_RING(2);
1949 OUT_RING(cmd); 1961 OUT_RING(cmd);
1950 OUT_RING(0); /* noop */ 1962 OUT_RING(MI_NOOP);
1951 ADVANCE_LP_RING(); 1963 ADVANCE_LP_RING();
1952 } 1964 }
1953} 1965}
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2760 old_write_domain); 2772 old_write_domain);
2761} 2773}
2762 2774
2775void
2776i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
2777{
2778 switch (obj->write_domain) {
2779 case I915_GEM_DOMAIN_GTT:
2780 i915_gem_object_flush_gtt_write_domain(obj);
2781 break;
2782 case I915_GEM_DOMAIN_CPU:
2783 i915_gem_object_flush_cpu_write_domain(obj);
2784 break;
2785 default:
2786 i915_gem_object_flush_gpu_write_domain(obj);
2787 break;
2788 }
2789}
2790
2763/** 2791/**
2764 * Moves a single object to the GTT read, and possibly write domain. 2792 * Moves a single object to the GTT read, and possibly write domain.
2765 * 2793 *
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3525 return 0; 3553 return 0;
3526} 3554}
3527 3555
3556static int
3557i915_gem_wait_for_pending_flip(struct drm_device *dev,
3558 struct drm_gem_object **object_list,
3559 int count)
3560{
3561 drm_i915_private_t *dev_priv = dev->dev_private;
3562 struct drm_i915_gem_object *obj_priv;
3563 DEFINE_WAIT(wait);
3564 int i, ret = 0;
3565
3566 for (;;) {
3567 prepare_to_wait(&dev_priv->pending_flip_queue,
3568 &wait, TASK_INTERRUPTIBLE);
3569 for (i = 0; i < count; i++) {
3570 obj_priv = object_list[i]->driver_private;
3571 if (atomic_read(&obj_priv->pending_flip) > 0)
3572 break;
3573 }
3574 if (i == count)
3575 break;
3576
3577 if (!signal_pending(current)) {
3578 mutex_unlock(&dev->struct_mutex);
3579 schedule();
3580 mutex_lock(&dev->struct_mutex);
3581 continue;
3582 }
3583 ret = -ERESTARTSYS;
3584 break;
3585 }
3586 finish_wait(&dev_priv->pending_flip_queue, &wait);
3587
3588 return ret;
3589}
3590
3528int 3591int
3529i915_gem_execbuffer(struct drm_device *dev, void *data, 3592i915_gem_execbuffer(struct drm_device *dev, void *data,
3530 struct drm_file *file_priv) 3593 struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3540 int ret, ret2, i, pinned = 0; 3603 int ret, ret2, i, pinned = 0;
3541 uint64_t exec_offset; 3604 uint64_t exec_offset;
3542 uint32_t seqno, flush_domains, reloc_index; 3605 uint32_t seqno, flush_domains, reloc_index;
3543 int pin_tries; 3606 int pin_tries, flips;
3544 3607
3545#if WATCH_EXEC 3608#if WATCH_EXEC
3546 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3609 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3552 return -EINVAL; 3615 return -EINVAL;
3553 } 3616 }
3554 /* Copy in the exec list from userland */ 3617 /* Copy in the exec list from userland */
3555 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count); 3618 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3556 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count); 3619 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3557 if (exec_list == NULL || object_list == NULL) { 3620 if (exec_list == NULL || object_list == NULL) {
3558 DRM_ERROR("Failed to allocate exec or object list " 3621 DRM_ERROR("Failed to allocate exec or object list "
3559 "for %d buffers\n", 3622 "for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3598 i915_verify_inactive(dev, __FILE__, __LINE__); 3661 i915_verify_inactive(dev, __FILE__, __LINE__);
3599 3662
3600 if (atomic_read(&dev_priv->mm.wedged)) { 3663 if (atomic_read(&dev_priv->mm.wedged)) {
3601 DRM_ERROR("Execbuf while wedged\n");
3602 mutex_unlock(&dev->struct_mutex); 3664 mutex_unlock(&dev->struct_mutex);
3603 ret = -EIO; 3665 ret = -EIO;
3604 goto pre_mutex_err; 3666 goto pre_mutex_err;
3605 } 3667 }
3606 3668
3607 if (dev_priv->mm.suspended) { 3669 if (dev_priv->mm.suspended) {
3608 DRM_ERROR("Execbuf while VT-switched.\n");
3609 mutex_unlock(&dev->struct_mutex); 3670 mutex_unlock(&dev->struct_mutex);
3610 ret = -EBUSY; 3671 ret = -EBUSY;
3611 goto pre_mutex_err; 3672 goto pre_mutex_err;
3612 } 3673 }
3613 3674
3614 /* Look up object handles */ 3675 /* Look up object handles */
3676 flips = 0;
3615 for (i = 0; i < args->buffer_count; i++) { 3677 for (i = 0; i < args->buffer_count; i++) {
3616 object_list[i] = drm_gem_object_lookup(dev, file_priv, 3678 object_list[i] = drm_gem_object_lookup(dev, file_priv,
3617 exec_list[i].handle); 3679 exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3630 goto err; 3692 goto err;
3631 } 3693 }
3632 obj_priv->in_execbuffer = true; 3694 obj_priv->in_execbuffer = true;
3695 flips += atomic_read(&obj_priv->pending_flip);
3696 }
3697
3698 if (flips > 0) {
3699 ret = i915_gem_wait_for_pending_flip(dev, object_list,
3700 args->buffer_count);
3701 if (ret)
3702 goto err;
3633 } 3703 }
3634 3704
3635 /* Pin and relocate */ 3705 /* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
4356 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 4426 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
4357 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 4427 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
4358 I915_READ(HWS_PGA); /* posting read */ 4428 I915_READ(HWS_PGA); /* posting read */
4359 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 4429 DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
4360 4430
4361 return 0; 4431 return 0;
4362} 4432}
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
4614 for (i = 0; i < 8; i++) 4684 for (i = 0; i < 8; i++)
4615 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0); 4685 I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
4616 } 4686 }
4617
4618 i915_gem_detect_bit_6_swizzle(dev); 4687 i915_gem_detect_bit_6_swizzle(dev);
4688 init_waitqueue_head(&dev_priv->pending_flip_queue);
4619} 4689}
4620 4690
4621/* 4691/*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4790 user_data = (char __user *) (uintptr_t) args->data_ptr; 4860 user_data = (char __user *) (uintptr_t) args->data_ptr;
4791 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 4861 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4792 4862
4793 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size); 4863 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
4794 ret = copy_from_user(obj_addr, user_data, args->size); 4864 ret = copy_from_user(obj_addr, user_data, args->size);
4795 if (ret) 4865 if (ret)
4796 return -EFAULT; 4866 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 200e398453ca..30d6af6c09bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
121 0, pcibios_align_resource, 121 0, pcibios_align_resource,
122 dev_priv->bridge_dev); 122 dev_priv->bridge_dev);
123 if (ret) { 123 if (ret) {
124 DRM_DEBUG("failed bus alloc: %d\n", ret); 124 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
125 dev_priv->mch_res.start = 0; 125 dev_priv->mch_res.start = 0;
126 goto out; 126 goto out;
127 } 127 }
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 209 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
210 bool need_disable; 210 bool need_disable;
211 211
212 if (IS_IGDNG(dev)) { 212 if (IS_IRONLAKE(dev)) {
213 /* On IGDNG whatever DRAM config, GPU always do 213 /* On Ironlake whatever DRAM config, GPU always do
214 * same swizzling setup. 214 * same swizzling setup.
215 */ 215 */
216 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 216 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aa7fd82aa6eb..85f4c5de97e2 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -43,10 +43,13 @@
43 * we leave them always unmasked in IMR and then control enabling them through 43 * we leave them always unmasked in IMR and then control enabling them through
44 * PIPESTAT alone. 44 * PIPESTAT alone.
45 */ 45 */
46#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \ 46#define I915_INTERRUPT_ENABLE_FIX \
47 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 47 (I915_ASLE_INTERRUPT | \
48 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 48 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
49 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 49 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
50 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
51 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
52 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
50 53
51/** Interrupts that we mask and unmask at runtime. */ 54/** Interrupts that we mask and unmask at runtime. */
52#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT) 55#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
61 DRM_I915_VBLANK_PIPE_B) 64 DRM_I915_VBLANK_PIPE_B)
62 65
63void 66void
64igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 67ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
65{ 68{
66 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 69 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
67 dev_priv->gt_irq_mask_reg &= ~mask; 70 dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
71} 74}
72 75
73static inline void 76static inline void
74igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 77ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
75{ 78{
76 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 79 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
77 dev_priv->gt_irq_mask_reg |= mask; 80 dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
82 85
83/* For display hotplug interrupt */ 86/* For display hotplug interrupt */
84void 87void
85igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 88ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
86{ 89{
87 if ((dev_priv->irq_mask_reg & mask) != 0) { 90 if ((dev_priv->irq_mask_reg & mask) != 0) {
88 dev_priv->irq_mask_reg &= ~mask; 91 dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
92} 95}
93 96
94static inline void 97static inline void
95igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 98ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
96{ 99{
97 if ((dev_priv->irq_mask_reg & mask) != mask) { 100 if ((dev_priv->irq_mask_reg & mask) != mask) {
98 dev_priv->irq_mask_reg |= mask; 101 dev_priv->irq_mask_reg |= mask;
@@ -157,6 +160,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
157} 160}
158 161
159/** 162/**
163 * intel_enable_asle - enable ASLE interrupt for OpRegion
164 */
165void intel_enable_asle (struct drm_device *dev)
166{
167 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
168
169 if (IS_IRONLAKE(dev))
170 ironlake_enable_display_irq(dev_priv, DE_GSE);
171 else
172 i915_enable_pipestat(dev_priv, 1,
173 I915_LEGACY_BLC_EVENT_ENABLE);
174}
175
176/**
160 * i915_pipe_enabled - check if a pipe is enabled 177 * i915_pipe_enabled - check if a pipe is enabled
161 * @dev: DRM device 178 * @dev: DRM device
162 * @pipe: pipe to check 179 * @pipe: pipe to check
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
191 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 208 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
192 209
193 if (!i915_pipe_enabled(dev, pipe)) { 210 if (!i915_pipe_enabled(dev, pipe)) {
194 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); 211 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
212 "pipe %d\n", pipe);
195 return 0; 213 return 0;
196 } 214 }
197 215
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
220 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45; 238 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
221 239
222 if (!i915_pipe_enabled(dev, pipe)) { 240 if (!i915_pipe_enabled(dev, pipe)) {
223 DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe); 241 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
242 "pipe %d\n", pipe);
224 return 0; 243 return 0;
225 } 244 }
226 245
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
250 drm_sysfs_hotplug_event(dev); 269 drm_sysfs_hotplug_event(dev);
251} 270}
252 271
253irqreturn_t igdng_irq_handler(struct drm_device *dev) 272irqreturn_t ironlake_irq_handler(struct drm_device *dev)
254{ 273{
255 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
256 int ret = IRQ_NONE; 275 int ret = IRQ_NONE;
257 u32 de_iir, gt_iir, de_ier; 276 u32 de_iir, gt_iir, de_ier, pch_iir;
258 u32 new_de_iir, new_gt_iir; 277 u32 new_de_iir, new_gt_iir, new_pch_iir;
259 struct drm_i915_master_private *master_priv; 278 struct drm_i915_master_private *master_priv;
260 279
261 /* disable master interrupt before clearing iir */ 280 /* disable master interrupt before clearing iir */
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
265 284
266 de_iir = I915_READ(DEIIR); 285 de_iir = I915_READ(DEIIR);
267 gt_iir = I915_READ(GTIIR); 286 gt_iir = I915_READ(GTIIR);
287 pch_iir = I915_READ(SDEIIR);
268 288
269 for (;;) { 289 for (;;) {
270 if (de_iir == 0 && gt_iir == 0) 290 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
271 break; 291 break;
272 292
273 ret = IRQ_HANDLED; 293 ret = IRQ_HANDLED;
274 294
295 /* should clear PCH hotplug event before clear CPU irq */
296 I915_WRITE(SDEIIR, pch_iir);
297 new_pch_iir = I915_READ(SDEIIR);
298
275 I915_WRITE(DEIIR, de_iir); 299 I915_WRITE(DEIIR, de_iir);
276 new_de_iir = I915_READ(DEIIR); 300 new_de_iir = I915_READ(DEIIR);
277 I915_WRITE(GTIIR, gt_iir); 301 I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
291 DRM_WAKEUP(&dev_priv->irq_queue); 315 DRM_WAKEUP(&dev_priv->irq_queue);
292 } 316 }
293 317
318 if (de_iir & DE_GSE)
319 ironlake_opregion_gse_intr(dev);
320
321 /* check event from PCH */
322 if ((de_iir & DE_PCH_EVENT) &&
323 (pch_iir & SDE_HOTPLUG_MASK)) {
324 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
325 }
326
294 de_iir = new_de_iir; 327 de_iir = new_de_iir;
295 gt_iir = new_gt_iir; 328 gt_iir = new_gt_iir;
329 pch_iir = new_pch_iir;
296 } 330 }
297 331
298 I915_WRITE(DEIER, de_ier); 332 I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work)
317 char *reset_event[] = { "RESET=1", NULL }; 351 char *reset_event[] = { "RESET=1", NULL };
318 char *reset_done_event[] = { "ERROR=0", NULL }; 352 char *reset_done_event[] = { "ERROR=0", NULL };
319 353
320 DRM_DEBUG("generating error event\n"); 354 DRM_DEBUG_DRIVER("generating error event\n");
321 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); 355 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
322 356
323 if (atomic_read(&dev_priv->mm.wedged)) { 357 if (atomic_read(&dev_priv->mm.wedged)) {
324 if (IS_I965G(dev)) { 358 if (IS_I965G(dev)) {
325 DRM_DEBUG("resetting chip\n"); 359 DRM_DEBUG_DRIVER("resetting chip\n");
326 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); 360 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
327 if (!i965_reset(dev, GDRST_RENDER)) { 361 if (!i965_reset(dev, GDRST_RENDER)) {
328 atomic_set(&dev_priv->mm.wedged, 0); 362 atomic_set(&dev_priv->mm.wedged, 0);
329 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); 363 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
330 } 364 }
331 } else { 365 } else {
332 printk("reboot required\n"); 366 DRM_DEBUG_DRIVER("reboot required\n");
333 } 367 }
334 } 368 }
335} 369}
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev)
355 389
356 error = kmalloc(sizeof(*error), GFP_ATOMIC); 390 error = kmalloc(sizeof(*error), GFP_ATOMIC);
357 if (!error) { 391 if (!error) {
358 DRM_DEBUG("out ot memory, not capturing error state\n"); 392 DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
359 goto out; 393 goto out;
360 } 394 }
361 395
@@ -512,7 +546,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
512 /* 546 /*
513 * Wakeup waiting processes so they don't hang 547 * Wakeup waiting processes so they don't hang
514 */ 548 */
515 printk("i915: Waking up sleeping processes\n");
516 DRM_WAKEUP(&dev_priv->irq_queue); 549 DRM_WAKEUP(&dev_priv->irq_queue);
517 } 550 }
518 551
@@ -535,8 +568,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
535 568
536 atomic_inc(&dev_priv->irq_received); 569 atomic_inc(&dev_priv->irq_received);
537 570
538 if (IS_IGDNG(dev)) 571 if (IS_IRONLAKE(dev))
539 return igdng_irq_handler(dev); 572 return ironlake_irq_handler(dev);
540 573
541 iir = I915_READ(IIR); 574 iir = I915_READ(IIR);
542 575
@@ -568,14 +601,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
568 */ 601 */
569 if (pipea_stats & 0x8000ffff) { 602 if (pipea_stats & 0x8000ffff) {
570 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS) 603 if (pipea_stats & PIPE_FIFO_UNDERRUN_STATUS)
571 DRM_DEBUG("pipe a underrun\n"); 604 DRM_DEBUG_DRIVER("pipe a underrun\n");
572 I915_WRITE(PIPEASTAT, pipea_stats); 605 I915_WRITE(PIPEASTAT, pipea_stats);
573 irq_received = 1; 606 irq_received = 1;
574 } 607 }
575 608
576 if (pipeb_stats & 0x8000ffff) { 609 if (pipeb_stats & 0x8000ffff) {
577 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS) 610 if (pipeb_stats & PIPE_FIFO_UNDERRUN_STATUS)
578 DRM_DEBUG("pipe b underrun\n"); 611 DRM_DEBUG_DRIVER("pipe b underrun\n");
579 I915_WRITE(PIPEBSTAT, pipeb_stats); 612 I915_WRITE(PIPEBSTAT, pipeb_stats);
580 irq_received = 1; 613 irq_received = 1;
581 } 614 }
@@ -591,7 +624,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
591 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 624 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
592 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 625 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
593 626
594 DRM_DEBUG("hotplug event received, stat 0x%08x\n", 627 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
595 hotplug_status); 628 hotplug_status);
596 if (hotplug_status & dev_priv->hotplug_supported_mask) 629 if (hotplug_status & dev_priv->hotplug_supported_mask)
597 queue_work(dev_priv->wq, 630 queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
599 632
600 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 633 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
601 I915_READ(PORT_HOTPLUG_STAT); 634 I915_READ(PORT_HOTPLUG_STAT);
602
603 /* EOS interrupts occurs */
604 if (IS_IGD(dev) &&
605 (hotplug_status & CRT_EOS_INT_STATUS)) {
606 u32 temp;
607
608 DRM_DEBUG("EOS interrupt occurs\n");
609 /* status is already cleared */
610 temp = I915_READ(ADPA);
611 temp &= ~ADPA_DAC_ENABLE;
612 I915_WRITE(ADPA, temp);
613
614 temp = I915_READ(PORT_HOTPLUG_EN);
615 temp &= ~CRT_EOS_INT_EN;
616 I915_WRITE(PORT_HOTPLUG_EN, temp);
617
618 temp = I915_READ(PORT_HOTPLUG_STAT);
619 if (temp & CRT_EOS_INT_STATUS)
620 I915_WRITE(PORT_HOTPLUG_STAT,
621 CRT_EOS_INT_STATUS);
622 }
623 } 635 }
624 636
625 I915_WRITE(IIR, iir); 637 I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
641 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 653 mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
642 } 654 }
643 655
656 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
657 intel_prepare_page_flip(dev, 0);
658
659 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
660 intel_prepare_page_flip(dev, 1);
661
644 if (pipea_stats & vblank_status) { 662 if (pipea_stats & vblank_status) {
645 vblank++; 663 vblank++;
646 drm_handle_vblank(dev, 0); 664 drm_handle_vblank(dev, 0);
665 intel_finish_page_flip(dev, 0);
647 } 666 }
648 667
649 if (pipeb_stats & vblank_status) { 668 if (pipeb_stats & vblank_status) {
650 vblank++; 669 vblank++;
651 drm_handle_vblank(dev, 1); 670 drm_handle_vblank(dev, 1);
671 intel_finish_page_flip(dev, 1);
652 } 672 }
653 673
654 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) || 674 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@ static int i915_emit_irq(struct drm_device * dev)
684 704
685 i915_kernel_lost_context(dev); 705 i915_kernel_lost_context(dev);
686 706
687 DRM_DEBUG("\n"); 707 DRM_DEBUG_DRIVER("\n");
688 708
689 dev_priv->counter++; 709 dev_priv->counter++;
690 if (dev_priv->counter > 0x7FFFFFFFUL) 710 if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@ void i915_user_irq_get(struct drm_device *dev)
709 729
710 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 730 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
711 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) { 731 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
712 if (IS_IGDNG(dev)) 732 if (IS_IRONLAKE(dev))
713 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 733 ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
714 else 734 else
715 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 735 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
716 } 736 }
@@ -725,8 +745,8 @@ void i915_user_irq_put(struct drm_device *dev)
725 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 745 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
726 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 746 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
727 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) { 747 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
728 if (IS_IGDNG(dev)) 748 if (IS_IRONLAKE(dev))
729 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT); 749 ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
730 else 750 else
731 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 751 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
732 } 752 }
@@ -749,7 +769,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
749 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 769 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
750 int ret = 0; 770 int ret = 0;
751 771
752 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 772 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
753 READ_BREADCRUMB(dev_priv)); 773 READ_BREADCRUMB(dev_priv));
754 774
755 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 775 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
832 if (!(pipeconf & PIPEACONF_ENABLE)) 852 if (!(pipeconf & PIPEACONF_ENABLE))
833 return -EINVAL; 853 return -EINVAL;
834 854
835 if (IS_IGDNG(dev)) 855 if (IS_IRONLAKE(dev))
836 return 0; 856 return 0;
837 857
838 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 858 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
854 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 874 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
855 unsigned long irqflags; 875 unsigned long irqflags;
856 876
857 if (IS_IGDNG(dev)) 877 if (IS_IRONLAKE(dev))
858 return; 878 return;
859 879
860 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 880 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@ void i915_enable_interrupt (struct drm_device *dev)
868{ 888{
869 struct drm_i915_private *dev_priv = dev->dev_private; 889 struct drm_i915_private *dev_priv = dev->dev_private;
870 890
871 if (!IS_IGDNG(dev)) 891 if (!IS_IRONLAKE(dev))
872 opregion_enable_asle(dev); 892 opregion_enable_asle(dev);
873 dev_priv->irq_enabled = 1; 893 dev_priv->irq_enabled = 1;
874} 894}
@@ -976,7 +996,7 @@ void i915_hangcheck_elapsed(unsigned long data)
976 996
977/* drm_dma.h hooks 997/* drm_dma.h hooks
978*/ 998*/
979static void igdng_irq_preinstall(struct drm_device *dev) 999static void ironlake_irq_preinstall(struct drm_device *dev)
980{ 1000{
981 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1001 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
982 1002
@@ -992,14 +1012,21 @@ static void igdng_irq_preinstall(struct drm_device *dev)
992 I915_WRITE(GTIMR, 0xffffffff); 1012 I915_WRITE(GTIMR, 0xffffffff);
993 I915_WRITE(GTIER, 0x0); 1013 I915_WRITE(GTIER, 0x0);
994 (void) I915_READ(GTIER); 1014 (void) I915_READ(GTIER);
1015
1016 /* south display irq */
1017 I915_WRITE(SDEIMR, 0xffffffff);
1018 I915_WRITE(SDEIER, 0x0);
1019 (void) I915_READ(SDEIER);
995} 1020}
996 1021
997static int igdng_irq_postinstall(struct drm_device *dev) 1022static int ironlake_irq_postinstall(struct drm_device *dev)
998{ 1023{
999 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1024 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1000 /* enable kind of interrupts always enabled */ 1025 /* enable kind of interrupts always enabled */
1001 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */; 1026 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
1002 u32 render_mask = GT_USER_INTERRUPT; 1027 u32 render_mask = GT_USER_INTERRUPT;
1028 u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1029 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1003 1030
1004 dev_priv->irq_mask_reg = ~display_mask; 1031 dev_priv->irq_mask_reg = ~display_mask;
1005 dev_priv->de_irq_enable_reg = display_mask; 1032 dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
1019 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1046 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
1020 (void) I915_READ(GTIER); 1047 (void) I915_READ(GTIER);
1021 1048
1049 dev_priv->pch_irq_mask_reg = ~hotplug_mask;
1050 dev_priv->pch_irq_enable_reg = hotplug_mask;
1051
1052 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1053 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
1054 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
1055 (void) I915_READ(SDEIER);
1056
1022 return 0; 1057 return 0;
1023} 1058}
1024 1059
@@ -1031,8 +1066,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1031 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1066 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
1032 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1067 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
1033 1068
1034 if (IS_IGDNG(dev)) { 1069 if (IS_IRONLAKE(dev)) {
1035 igdng_irq_preinstall(dev); 1070 ironlake_irq_preinstall(dev);
1036 return; 1071 return;
1037 } 1072 }
1038 1073
@@ -1059,8 +1094,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1059 1094
1060 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1095 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1061 1096
1062 if (IS_IGDNG(dev)) 1097 if (IS_IRONLAKE(dev))
1063 return igdng_irq_postinstall(dev); 1098 return ironlake_irq_postinstall(dev);
1064 1099
1065 /* Unmask the interrupts that we always want on. */ 1100 /* Unmask the interrupts that we always want on. */
1066 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1101 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1120 return 0; 1155 return 0;
1121} 1156}
1122 1157
1123static void igdng_irq_uninstall(struct drm_device *dev) 1158static void ironlake_irq_uninstall(struct drm_device *dev)
1124{ 1159{
1125 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1160 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1126 I915_WRITE(HWSTAM, 0xffffffff); 1161 I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
1143 1178
1144 dev_priv->vblank_pipe = 0; 1179 dev_priv->vblank_pipe = 0;
1145 1180
1146 if (IS_IGDNG(dev)) { 1181 if (IS_IRONLAKE(dev)) {
1147 igdng_irq_uninstall(dev); 1182 ironlake_irq_uninstall(dev);
1148 return; 1183 return;
1149 } 1184 }
1150 1185
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 2d5193556d3f..7cc8410239cb 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -118,6 +118,10 @@ struct opregion_asle {
118#define ASLE_BACKLIGHT_FAIL (2<<12) 118#define ASLE_BACKLIGHT_FAIL (2<<12)
119#define ASLE_PFIT_FAIL (2<<14) 119#define ASLE_PFIT_FAIL (2<<14)
120#define ASLE_PWM_FREQ_FAIL (2<<16) 120#define ASLE_PWM_FREQ_FAIL (2<<16)
121#define ASLE_ALS_ILLUM_FAILED (1<<10)
122#define ASLE_BACKLIGHT_FAILED (1<<12)
123#define ASLE_PFIT_FAILED (1<<14)
124#define ASLE_PWM_FREQ_FAILED (1<<16)
121 125
122/* ASLE backlight brightness to set */ 126/* ASLE backlight brightness to set */
123#define ASLE_BCLP_VALID (1<<31) 127#define ASLE_BCLP_VALID (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
163 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE)) 167 if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
164 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 168 pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
165 else { 169 else {
166 if (IS_IGD(dev)) { 170 if (IS_PINEVIEW(dev)) {
167 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); 171 blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
168 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 172 max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >>
169 BACKLIGHT_MODULATION_FREQ_SHIFT; 173 BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
224 asle_req = asle->aslc & ASLE_REQ_MSK; 228 asle_req = asle->aslc & ASLE_REQ_MSK;
225 229
226 if (!asle_req) { 230 if (!asle_req) {
227 DRM_DEBUG("non asle set request??\n"); 231 DRM_DEBUG_DRIVER("non asle set request??\n");
228 return; 232 return;
229 } 233 }
230 234
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
243 asle->aslc = asle_stat; 247 asle->aslc = asle_stat;
244} 248}
245 249
250static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 struct opregion_asle *asle = dev_priv->opregion.asle;
254 u32 cpu_pwm_ctl, pch_pwm_ctl2;
255 u32 max_backlight, level;
256
257 if (!(bclp & ASLE_BCLP_VALID))
258 return ASLE_BACKLIGHT_FAILED;
259
260 bclp &= ASLE_BCLP_MSK;
261 if (bclp < 0 || bclp > 255)
262 return ASLE_BACKLIGHT_FAILED;
263
264 cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
265 pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
266 /* get the max PWM frequency */
267 max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
268 /* calculate the expected PMW frequency */
269 level = (bclp * max_backlight) / 255;
270 /* reserve the high 16 bits */
271 cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
272 /* write the updated PWM frequency */
273 I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
274
275 asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
276
277 return 0;
278}
279
280void ironlake_opregion_gse_intr(struct drm_device *dev)
281{
282 struct drm_i915_private *dev_priv = dev->dev_private;
283 struct opregion_asle *asle = dev_priv->opregion.asle;
284 u32 asle_stat = 0;
285 u32 asle_req;
286
287 if (!asle)
288 return;
289
290 asle_req = asle->aslc & ASLE_REQ_MSK;
291
292 if (!asle_req) {
293 DRM_DEBUG_DRIVER("non asle set request??\n");
294 return;
295 }
296
297 if (asle_req & ASLE_SET_ALS_ILLUM) {
298 DRM_DEBUG_DRIVER("Illum is not supported\n");
299 asle_stat |= ASLE_ALS_ILLUM_FAILED;
300 }
301
302 if (asle_req & ASLE_SET_BACKLIGHT)
303 asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
304
305 if (asle_req & ASLE_SET_PFIT) {
306 DRM_DEBUG_DRIVER("Pfit is not supported\n");
307 asle_stat |= ASLE_PFIT_FAILED;
308 }
309
310 if (asle_req & ASLE_SET_PWM_FREQ) {
311 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
312 asle_stat |= ASLE_PWM_FREQ_FAILED;
313 }
314
315 asle->aslc = asle_stat;
316}
246#define ASLE_ALS_EN (1<<0) 317#define ASLE_ALS_EN (1<<0)
247#define ASLE_BLC_EN (1<<1) 318#define ASLE_BLC_EN (1<<1)
248#define ASLE_PFIT_EN (1<<2) 319#define ASLE_PFIT_EN (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
258 unsigned long irqflags; 329 unsigned long irqflags;
259 330
260 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 331 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
261 i915_enable_pipestat(dev_priv, 1, 332 intel_enable_asle(dev);
262 I915_LEGACY_BLC_EVENT_ENABLE);
263 spin_unlock_irqrestore(&dev_priv->user_irq_lock, 333 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
264 irqflags); 334 irqflags);
265 } 335 }
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
361 int err = 0; 431 int err = 0;
362 432
363 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 433 pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
364 DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); 434 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
365 if (asls == 0) { 435 if (asls == 0) {
366 DRM_DEBUG("ACPI OpRegion not supported!\n"); 436 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
367 return -ENOTSUPP; 437 return -ENOTSUPP;
368 } 438 }
369 439
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
373 443
374 opregion->header = base; 444 opregion->header = base;
375 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { 445 if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
376 DRM_DEBUG("opregion signature mismatch\n"); 446 DRM_DEBUG_DRIVER("opregion signature mismatch\n");
377 err = -EINVAL; 447 err = -EINVAL;
378 goto err_out; 448 goto err_out;
379 } 449 }
380 450
381 mboxes = opregion->header->mboxes; 451 mboxes = opregion->header->mboxes;
382 if (mboxes & MBOX_ACPI) { 452 if (mboxes & MBOX_ACPI) {
383 DRM_DEBUG("Public ACPI methods supported\n"); 453 DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
384 opregion->acpi = base + OPREGION_ACPI_OFFSET; 454 opregion->acpi = base + OPREGION_ACPI_OFFSET;
385 if (drm_core_check_feature(dev, DRIVER_MODESET)) 455 if (drm_core_check_feature(dev, DRIVER_MODESET))
386 intel_didl_outputs(dev); 456 intel_didl_outputs(dev);
387 } else { 457 } else {
388 DRM_DEBUG("Public ACPI methods not supported\n"); 458 DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
389 err = -ENOTSUPP; 459 err = -ENOTSUPP;
390 goto err_out; 460 goto err_out;
391 } 461 }
392 opregion->enabled = 1; 462 opregion->enabled = 1;
393 463
394 if (mboxes & MBOX_SWSCI) { 464 if (mboxes & MBOX_SWSCI) {
395 DRM_DEBUG("SWSCI supported\n"); 465 DRM_DEBUG_DRIVER("SWSCI supported\n");
396 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 466 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
397 } 467 }
398 if (mboxes & MBOX_ASLE) { 468 if (mboxes & MBOX_ASLE) {
399 DRM_DEBUG("ASLE supported\n"); 469 DRM_DEBUG_DRIVER("ASLE supported\n");
400 opregion->asle = base + OPREGION_ASLE_OFFSET; 470 opregion->asle = base + OPREGION_ASLE_OFFSET;
401 opregion_enable_asle(dev); 471 opregion_enable_asle(dev);
402 } 472 }
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1687edf68795..974b3cf70618 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -140,6 +140,7 @@
140#define MI_NOOP MI_INSTR(0, 0) 140#define MI_NOOP MI_INSTR(0, 0)
141#define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 141#define MI_USER_INTERRUPT MI_INSTR(0x02, 0)
142#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) 142#define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0)
143#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16)
143#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) 144#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
144#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) 145#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
145#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) 146#define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
151#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 152#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
152#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 153#define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0)
153#define MI_REPORT_HEAD MI_INSTR(0x07, 0) 154#define MI_REPORT_HEAD MI_INSTR(0x07, 0)
155#define MI_OVERLAY_FLIP MI_INSTR(0x11,0)
156#define MI_OVERLAY_CONTINUE (0x0<<21)
157#define MI_OVERLAY_ON (0x1<<21)
158#define MI_OVERLAY_OFF (0x2<<21)
154#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 159#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
160#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
161#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
155#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 162#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
156#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 163#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
157#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 164#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
260#define HWS_PGA 0x02080 267#define HWS_PGA 0x02080
261#define HWS_ADDRESS_MASK 0xfffff000 268#define HWS_ADDRESS_MASK 0xfffff000
262#define HWS_START_ADDRESS_SHIFT 4 269#define HWS_START_ADDRESS_SHIFT 4
270#define PWRCTXA 0x2088 /* 965GM+ only */
271#define PWRCTX_EN (1<<0)
263#define IPEIR 0x02088 272#define IPEIR 0x02088
264#define IPEHR 0x0208c 273#define IPEHR 0x0208c
265#define INSTDONE 0x02090 274#define INSTDONE 0x02090
@@ -405,6 +414,13 @@
405# define GPIO_DATA_VAL_IN (1 << 12) 414# define GPIO_DATA_VAL_IN (1 << 12)
406# define GPIO_DATA_PULLUP_DISABLE (1 << 13) 415# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
407 416
417#define GMBUS0 0x5100
418#define GMBUS1 0x5104
419#define GMBUS2 0x5108
420#define GMBUS3 0x510c
421#define GMBUS4 0x5110
422#define GMBUS5 0x5120
423
408/* 424/*
409 * Clock control & power management 425 * Clock control & power management
410 */ 426 */
@@ -435,7 +451,7 @@
435#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 451#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
436#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 452#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
437#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 453#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
438#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */ 454#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
439 455
440#define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 456#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
441#define I915_CRC_ERROR_ENABLE (1UL<<29) 457#define I915_CRC_ERROR_ENABLE (1UL<<29)
@@ -512,7 +528,7 @@
512 */ 528 */
513#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 529#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
514#define DPLL_FPA01_P1_POST_DIV_SHIFT 16 530#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
515#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15 531#define DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
516/* i830, required in DVO non-gang */ 532/* i830, required in DVO non-gang */
517#define PLL_P2_DIVIDE_BY_4 (1 << 23) 533#define PLL_P2_DIVIDE_BY_4 (1 << 23)
518#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 534#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@@ -522,7 +538,7 @@
522#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 538#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
523#define PLL_REF_INPUT_MASK (3 << 13) 539#define PLL_REF_INPUT_MASK (3 << 13)
524#define PLL_LOAD_PULSE_PHASE_SHIFT 9 540#define PLL_LOAD_PULSE_PHASE_SHIFT 9
525/* IGDNG */ 541/* Ironlake */
526# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9 542# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
527# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9) 543# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
528# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9) 544# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
@@ -586,12 +602,12 @@
586#define FPB0 0x06048 602#define FPB0 0x06048
587#define FPB1 0x0604c 603#define FPB1 0x0604c
588#define FP_N_DIV_MASK 0x003f0000 604#define FP_N_DIV_MASK 0x003f0000
589#define FP_N_IGD_DIV_MASK 0x00ff0000 605#define FP_N_PINEVIEW_DIV_MASK 0x00ff0000
590#define FP_N_DIV_SHIFT 16 606#define FP_N_DIV_SHIFT 16
591#define FP_M1_DIV_MASK 0x00003f00 607#define FP_M1_DIV_MASK 0x00003f00
592#define FP_M1_DIV_SHIFT 8 608#define FP_M1_DIV_SHIFT 8
593#define FP_M2_DIV_MASK 0x0000003f 609#define FP_M2_DIV_MASK 0x0000003f
594#define FP_M2_IGD_DIV_MASK 0x000000ff 610#define FP_M2_PINEVIEW_DIV_MASK 0x000000ff
595#define FP_M2_DIV_SHIFT 0 611#define FP_M2_DIV_SHIFT 0
596#define DPLL_TEST 0x606c 612#define DPLL_TEST 0x606c
597#define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 613#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@@ -769,7 +785,8 @@
769 785
770/** GM965 GM45 render standby register */ 786/** GM965 GM45 render standby register */
771#define MCHBAR_RENDER_STANDBY 0x111B8 787#define MCHBAR_RENDER_STANDBY 0x111B8
772 788#define RCX_SW_EXIT (1<<23)
789#define RSX_STATUS_MASK 0x00700000
773#define PEG_BAND_GAP_DATA 0x14d68 790#define PEG_BAND_GAP_DATA 0x14d68
774 791
775/* 792/*
@@ -844,7 +861,6 @@
844#define SDVOB_HOTPLUG_INT_EN (1 << 26) 861#define SDVOB_HOTPLUG_INT_EN (1 << 26)
845#define SDVOC_HOTPLUG_INT_EN (1 << 25) 862#define SDVOC_HOTPLUG_INT_EN (1 << 25)
846#define TV_HOTPLUG_INT_EN (1 << 18) 863#define TV_HOTPLUG_INT_EN (1 << 18)
847#define CRT_EOS_INT_EN (1 << 10)
848#define CRT_HOTPLUG_INT_EN (1 << 9) 864#define CRT_HOTPLUG_INT_EN (1 << 9)
849#define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 865#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
850#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8) 866#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
@@ -868,7 +884,6 @@
868 HDMID_HOTPLUG_INT_EN | \ 884 HDMID_HOTPLUG_INT_EN | \
869 SDVOB_HOTPLUG_INT_EN | \ 885 SDVOB_HOTPLUG_INT_EN | \
870 SDVOC_HOTPLUG_INT_EN | \ 886 SDVOC_HOTPLUG_INT_EN | \
871 TV_HOTPLUG_INT_EN | \
872 CRT_HOTPLUG_INT_EN) 887 CRT_HOTPLUG_INT_EN)
873 888
874 889
@@ -879,7 +894,6 @@
879#define DPC_HOTPLUG_INT_STATUS (1 << 28) 894#define DPC_HOTPLUG_INT_STATUS (1 << 28)
880#define HDMID_HOTPLUG_INT_STATUS (1 << 27) 895#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
881#define DPD_HOTPLUG_INT_STATUS (1 << 27) 896#define DPD_HOTPLUG_INT_STATUS (1 << 27)
882#define CRT_EOS_INT_STATUS (1 << 12)
883#define CRT_HOTPLUG_INT_STATUS (1 << 11) 897#define CRT_HOTPLUG_INT_STATUS (1 << 11)
884#define TV_HOTPLUG_INT_STATUS (1 << 10) 898#define TV_HOTPLUG_INT_STATUS (1 << 10)
885#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 899#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -1620,7 +1634,7 @@
1620#define DP_CLOCK_OUTPUT_ENABLE (1 << 13) 1634#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1621 1635
1622#define DP_SCRAMBLING_DISABLE (1 << 12) 1636#define DP_SCRAMBLING_DISABLE (1 << 12)
1623#define DP_SCRAMBLING_DISABLE_IGDNG (1 << 7) 1637#define DP_SCRAMBLING_DISABLE_IRONLAKE (1 << 7)
1624 1638
1625/** limit RGB values to avoid confusing TVs */ 1639/** limit RGB values to avoid confusing TVs */
1626#define DP_COLOR_RANGE_16_235 (1 << 8) 1640#define DP_COLOR_RANGE_16_235 (1 << 8)
@@ -1808,7 +1822,7 @@
1808#define DSPFW3 0x7003c 1822#define DSPFW3 0x7003c
1809#define DSPFW_HPLL_SR_EN (1<<31) 1823#define DSPFW_HPLL_SR_EN (1<<31)
1810#define DSPFW_CURSOR_SR_SHIFT 24 1824#define DSPFW_CURSOR_SR_SHIFT 24
1811#define IGD_SELF_REFRESH_EN (1<<30) 1825#define PINEVIEW_SELF_REFRESH_EN (1<<30)
1812 1826
1813/* FIFO watermark sizes etc */ 1827/* FIFO watermark sizes etc */
1814#define G4X_FIFO_LINE_SIZE 64 1828#define G4X_FIFO_LINE_SIZE 64
@@ -1824,16 +1838,16 @@
1824#define G4X_MAX_WM 0x3f 1838#define G4X_MAX_WM 0x3f
1825#define I915_MAX_WM 0x3f 1839#define I915_MAX_WM 0x3f
1826 1840
1827#define IGD_DISPLAY_FIFO 512 /* in 64byte unit */ 1841#define PINEVIEW_DISPLAY_FIFO 512 /* in 64byte unit */
1828#define IGD_FIFO_LINE_SIZE 64 1842#define PINEVIEW_FIFO_LINE_SIZE 64
1829#define IGD_MAX_WM 0x1ff 1843#define PINEVIEW_MAX_WM 0x1ff
1830#define IGD_DFT_WM 0x3f 1844#define PINEVIEW_DFT_WM 0x3f
1831#define IGD_DFT_HPLLOFF_WM 0 1845#define PINEVIEW_DFT_HPLLOFF_WM 0
1832#define IGD_GUARD_WM 10 1846#define PINEVIEW_GUARD_WM 10
1833#define IGD_CURSOR_FIFO 64 1847#define PINEVIEW_CURSOR_FIFO 64
1834#define IGD_CURSOR_MAX_WM 0x3f 1848#define PINEVIEW_CURSOR_MAX_WM 0x3f
1835#define IGD_CURSOR_DFT_WM 0 1849#define PINEVIEW_CURSOR_DFT_WM 0
1836#define IGD_CURSOR_GUARD_WM 5 1850#define PINEVIEW_CURSOR_GUARD_WM 5
1837 1851
1838/* 1852/*
1839 * The two pipe frame counter registers are not synchronized, so 1853 * The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
1907#define DISPPLANE_16BPP (0x5<<26) 1921#define DISPPLANE_16BPP (0x5<<26)
1908#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 1922#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
1909#define DISPPLANE_32BPP (0x7<<26) 1923#define DISPPLANE_32BPP (0x7<<26)
1924#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
1910#define DISPPLANE_STEREO_ENABLE (1<<25) 1925#define DISPPLANE_STEREO_ENABLE (1<<25)
1911#define DISPPLANE_STEREO_DISABLE 0 1926#define DISPPLANE_STEREO_DISABLE 0
1912#define DISPPLANE_SEL_PIPE_MASK (1<<24) 1927#define DISPPLANE_SEL_PIPE_MASK (1<<24)
@@ -1918,7 +1933,7 @@
1918#define DISPPLANE_NO_LINE_DOUBLE 0 1933#define DISPPLANE_NO_LINE_DOUBLE 0
1919#define DISPPLANE_STEREO_POLARITY_FIRST 0 1934#define DISPPLANE_STEREO_POLARITY_FIRST 0
1920#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 1935#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
1921#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* IGDNG */ 1936#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
1922#define DISPPLANE_TILED (1<<10) 1937#define DISPPLANE_TILED (1<<10)
1923#define DSPAADDR 0x70184 1938#define DSPAADDR 0x70184
1924#define DSPASTRIDE 0x70188 1939#define DSPASTRIDE 0x70188
@@ -1971,7 +1986,7 @@
1971# define VGA_2X_MODE (1 << 30) 1986# define VGA_2X_MODE (1 << 30)
1972# define VGA_PIPE_B_SELECT (1 << 29) 1987# define VGA_PIPE_B_SELECT (1 << 29)
1973 1988
1974/* IGDNG */ 1989/* Ironlake */
1975 1990
1976#define CPU_VGACNTRL 0x41000 1991#define CPU_VGACNTRL 0x41000
1977 1992
@@ -2117,6 +2132,7 @@
2117#define SDE_PORTC_HOTPLUG (1 << 9) 2132#define SDE_PORTC_HOTPLUG (1 << 9)
2118#define SDE_PORTB_HOTPLUG (1 << 8) 2133#define SDE_PORTB_HOTPLUG (1 << 8)
2119#define SDE_SDVOB_HOTPLUG (1 << 6) 2134#define SDE_SDVOB_HOTPLUG (1 << 6)
2135#define SDE_HOTPLUG_MASK (0xf << 8)
2120 2136
2121#define SDEISR 0xc4000 2137#define SDEISR 0xc4000
2122#define SDEIMR 0xc4004 2138#define SDEIMR 0xc4004
@@ -2157,6 +2173,13 @@
2157#define PCH_GPIOE 0xc5020 2173#define PCH_GPIOE 0xc5020
2158#define PCH_GPIOF 0xc5024 2174#define PCH_GPIOF 0xc5024
2159 2175
2176#define PCH_GMBUS0 0xc5100
2177#define PCH_GMBUS1 0xc5104
2178#define PCH_GMBUS2 0xc5108
2179#define PCH_GMBUS3 0xc510c
2180#define PCH_GMBUS4 0xc5110
2181#define PCH_GMBUS5 0xc5120
2182
2160#define PCH_DPLL_A 0xc6014 2183#define PCH_DPLL_A 0xc6014
2161#define PCH_DPLL_B 0xc6018 2184#define PCH_DPLL_B 0xc6018
2162 2185
@@ -2292,7 +2315,7 @@
2292#define FDI_DP_PORT_WIDTH_X3 (2<<19) 2315#define FDI_DP_PORT_WIDTH_X3 (2<<19)
2293#define FDI_DP_PORT_WIDTH_X4 (3<<19) 2316#define FDI_DP_PORT_WIDTH_X4 (3<<19)
2294#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18) 2317#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
2295/* IGDNG: hardwired to 1 */ 2318/* Ironlake: hardwired to 1 */
2296#define FDI_TX_PLL_ENABLE (1<<14) 2319#define FDI_TX_PLL_ENABLE (1<<14)
2297/* both Tx and Rx */ 2320/* both Tx and Rx */
2298#define FDI_SCRAMBLING_ENABLE (0<<7) 2321#define FDI_SCRAMBLING_ENABLE (0<<7)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 6eec8171a44e..d5ebb00a9d49 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -27,14 +27,14 @@
27#include "drmP.h" 27#include "drmP.h"
28#include "drm.h" 28#include "drm.h"
29#include "i915_drm.h" 29#include "i915_drm.h"
30#include "i915_drv.h" 30#include "intel_drv.h"
31 31
32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 32static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = dev->dev_private;
35 u32 dpll_reg; 35 u32 dpll_reg;
36 36
37 if (IS_IGDNG(dev)) { 37 if (IS_IRONLAKE(dev)) {
38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B; 38 dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
39 } else { 39 } else {
40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B; 40 dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
53 if (!i915_pipe_enabled(dev, pipe)) 53 if (!i915_pipe_enabled(dev, pipe))
54 return; 54 return;
55 55
56 if (IS_IGDNG(dev)) 56 if (IS_IRONLAKE(dev))
57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 57 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
58 58
59 if (pipe == PIPE_A) 59 if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
75 if (!i915_pipe_enabled(dev, pipe)) 75 if (!i915_pipe_enabled(dev, pipe))
76 return; 76 return;
77 77
78 if (IS_IGDNG(dev)) 78 if (IS_IRONLAKE(dev))
79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B; 79 reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
80 80
81 if (pipe == PIPE_A) 81 if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 if (IS_IGDNG(dev)) { 242 if (IS_IRONLAKE(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
245 } 245 }
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
247 /* Pipe & plane A info */ 247 /* Pipe & plane A info */
248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
250 if (IS_IGDNG(dev)) { 250 if (IS_IRONLAKE(dev)) {
251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0); 251 dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1); 252 dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A); 253 dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
256 dev_priv->saveFPA1 = I915_READ(FPA1); 256 dev_priv->saveFPA1 = I915_READ(FPA1);
257 dev_priv->saveDPLL_A = I915_READ(DPLL_A); 257 dev_priv->saveDPLL_A = I915_READ(DPLL_A);
258 } 258 }
259 if (IS_I965G(dev) && !IS_IGDNG(dev)) 259 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 260 dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 261 dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 262 dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 264 dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 265 dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 266 dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
267 if (!IS_IGDNG(dev)) 267 if (!IS_IRONLAKE(dev))
268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 268 dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
269 269
270 if (IS_IGDNG(dev)) { 270 if (IS_IRONLAKE(dev)) {
271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1); 271 dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1); 272 dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1); 273 dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
304 /* Pipe & plane B info */ 304 /* Pipe & plane B info */
305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 305 dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 306 dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
307 if (IS_IGDNG(dev)) { 307 if (IS_IRONLAKE(dev)) {
308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0); 308 dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1); 309 dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B); 310 dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
313 dev_priv->saveFPB1 = I915_READ(FPB1); 313 dev_priv->saveFPB1 = I915_READ(FPB1);
314 dev_priv->saveDPLL_B = I915_READ(DPLL_B); 314 dev_priv->saveDPLL_B = I915_READ(DPLL_B);
315 } 315 }
316 if (IS_I965G(dev) && !IS_IGDNG(dev)) 316 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 317 dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 318 dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 319 dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 321 dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 322 dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 323 dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
324 if (!IS_IGDNG(dev)) 324 if (!IS_IRONLAKE(dev))
325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B); 325 dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
326 326
327 if (IS_IGDNG(dev)) { 327 if (IS_IRONLAKE(dev)) {
328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1); 328 dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1); 329 dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1); 330 dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
369 if (drm_core_check_feature(dev, DRIVER_MODESET)) 369 if (drm_core_check_feature(dev, DRIVER_MODESET))
370 return; 370 return;
371 371
372 if (IS_IGDNG(dev)) { 372 if (IS_IRONLAKE(dev)) {
373 dpll_a_reg = PCH_DPLL_A; 373 dpll_a_reg = PCH_DPLL_A;
374 dpll_b_reg = PCH_DPLL_B; 374 dpll_b_reg = PCH_DPLL_B;
375 fpa0_reg = PCH_FPA0; 375 fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
385 fpb1_reg = FPB1; 385 fpb1_reg = FPB1;
386 } 386 }
387 387
388 if (IS_IGDNG(dev)) { 388 if (IS_IRONLAKE(dev)) {
389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL); 389 I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL); 390 I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
391 } 391 }
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
402 /* Actually enable it */ 402 /* Actually enable it */
403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A); 403 I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
404 DRM_UDELAY(150); 404 DRM_UDELAY(150);
405 if (IS_I965G(dev) && !IS_IGDNG(dev)) 405 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 406 I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
407 DRM_UDELAY(150); 407 DRM_UDELAY(150);
408 408
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 413 I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 414 I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 415 I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
416 if (!IS_IGDNG(dev)) 416 if (!IS_IRONLAKE(dev))
417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 417 I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
418 418
419 if (IS_IGDNG(dev)) { 419 if (IS_IRONLAKE(dev)) {
420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1); 420 I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1); 421 I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1); 422 I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
467 /* Actually enable it */ 467 /* Actually enable it */
468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B); 468 I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
469 DRM_UDELAY(150); 469 DRM_UDELAY(150);
470 if (IS_I965G(dev) && !IS_IGDNG(dev)) 470 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 471 I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
472 DRM_UDELAY(150); 472 DRM_UDELAY(150);
473 473
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 478 I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 479 I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 480 I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
481 if (!IS_IGDNG(dev)) 481 if (!IS_IRONLAKE(dev))
482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 482 I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
483 483
484 if (IS_IGDNG(dev)) { 484 if (IS_IRONLAKE(dev)) {
485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1); 485 I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1); 486 I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1); 487 I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
546 dev_priv->saveCURSIZE = I915_READ(CURSIZE); 546 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
547 547
548 /* CRT state */ 548 /* CRT state */
549 if (IS_IGDNG(dev)) { 549 if (IS_IRONLAKE(dev)) {
550 dev_priv->saveADPA = I915_READ(PCH_ADPA); 550 dev_priv->saveADPA = I915_READ(PCH_ADPA);
551 } else { 551 } else {
552 dev_priv->saveADPA = I915_READ(ADPA); 552 dev_priv->saveADPA = I915_READ(ADPA);
553 } 553 }
554 554
555 /* LVDS state */ 555 /* LVDS state */
556 if (IS_IGDNG(dev)) { 556 if (IS_IRONLAKE(dev)) {
557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 557 dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1); 558 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2); 559 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
571 dev_priv->saveLVDS = I915_READ(LVDS); 571 dev_priv->saveLVDS = I915_READ(LVDS);
572 } 572 }
573 573
574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) 574 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 575 dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
576 576
577 if (IS_IGDNG(dev)) { 577 if (IS_IRONLAKE(dev)) {
578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 578 dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 579 dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 580 dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
614 dev_priv->saveVGA0 = I915_READ(VGA0); 614 dev_priv->saveVGA0 = I915_READ(VGA0);
615 dev_priv->saveVGA1 = I915_READ(VGA1); 615 dev_priv->saveVGA1 = I915_READ(VGA1);
616 dev_priv->saveVGA_PD = I915_READ(VGA_PD); 616 dev_priv->saveVGA_PD = I915_READ(VGA_PD);
617 if (IS_IGDNG(dev)) 617 if (IS_IRONLAKE(dev))
618 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL); 618 dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
619 else 619 else
620 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 620 dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
656 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); 656 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
657 657
658 /* CRT state */ 658 /* CRT state */
659 if (IS_IGDNG(dev)) 659 if (IS_IRONLAKE(dev))
660 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 660 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
661 else 661 else
662 I915_WRITE(ADPA, dev_priv->saveADPA); 662 I915_WRITE(ADPA, dev_priv->saveADPA);
663 663
664 /* LVDS state */ 664 /* LVDS state */
665 if (IS_I965G(dev) && !IS_IGDNG(dev)) 665 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
666 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 666 I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
667 667
668 if (IS_IGDNG(dev)) { 668 if (IS_IRONLAKE(dev)) {
669 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS); 669 I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
670 } else if (IS_MOBILE(dev) && !IS_I830(dev)) 670 } else if (IS_MOBILE(dev) && !IS_I830(dev))
671 I915_WRITE(LVDS, dev_priv->saveLVDS); 671 I915_WRITE(LVDS, dev_priv->saveLVDS);
672 672
673 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev)) 673 if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
674 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 674 I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
675 675
676 if (IS_IGDNG(dev)) { 676 if (IS_IRONLAKE(dev)) {
677 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL); 677 I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
678 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2); 678 I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
679 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL); 679 I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
713 } 713 }
714 714
715 /* VGA state */ 715 /* VGA state */
716 if (IS_IGDNG(dev)) 716 if (IS_IRONLAKE(dev))
717 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL); 717 I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
718 else 718 else
719 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 719 I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev)
733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
734 734
735 /* Render Standby */ 735 /* Render Standby */
736 if (IS_I965G(dev) && IS_MOBILE(dev)) 736 if (I915_HAS_RC6(dev)) {
737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 737 dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
738 dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
739 }
738 740
739 /* Hardware status page */ 741 /* Hardware status page */
740 dev_priv->saveHWS = I915_READ(HWS_PGA); 742 dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev)
742 i915_save_display(dev); 744 i915_save_display(dev);
743 745
744 /* Interrupt state */ 746 /* Interrupt state */
745 if (IS_IGDNG(dev)) { 747 if (IS_IRONLAKE(dev)) {
746 dev_priv->saveDEIER = I915_READ(DEIER); 748 dev_priv->saveDEIER = I915_READ(DEIER);
747 dev_priv->saveDEIMR = I915_READ(DEIMR); 749 dev_priv->saveDEIMR = I915_READ(DEIMR);
748 dev_priv->saveGTIER = I915_READ(GTIER); 750 dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@ int i915_save_state(struct drm_device *dev)
754 dev_priv->saveIMR = I915_READ(IMR); 756 dev_priv->saveIMR = I915_READ(IMR);
755 } 757 }
756 758
757 /* Clock gating state */
758 dev_priv->saveD_STATE = I915_READ(D_STATE);
759 dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
760
761 /* Cache mode state */ 759 /* Cache mode state */
762 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 760 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
763 761
@@ -796,8 +794,10 @@ int i915_restore_state(struct drm_device *dev)
796 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 794 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
797 795
798 /* Render Standby */ 796 /* Render Standby */
799 if (IS_I965G(dev) && IS_MOBILE(dev)) 797 if (I915_HAS_RC6(dev)) {
800 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 798 I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
799 I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
800 }
801 801
802 /* Hardware status page */ 802 /* Hardware status page */
803 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 803 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@ int i915_restore_state(struct drm_device *dev)
817 i915_restore_display(dev); 817 i915_restore_display(dev);
818 818
819 /* Interrupt state */ 819 /* Interrupt state */
820 if (IS_IGDNG(dev)) { 820 if (IS_IRONLAKE(dev)) {
821 I915_WRITE(DEIER, dev_priv->saveDEIER); 821 I915_WRITE(DEIER, dev_priv->saveDEIER);
822 I915_WRITE(DEIMR, dev_priv->saveDEIMR); 822 I915_WRITE(DEIMR, dev_priv->saveDEIMR);
823 I915_WRITE(GTIER, dev_priv->saveGTIER); 823 I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@ int i915_restore_state(struct drm_device *dev)
830 } 830 }
831 831
832 /* Clock gating state */ 832 /* Clock gating state */
833 I915_WRITE (D_STATE, dev_priv->saveD_STATE); 833 intel_init_clock_gating(dev);
834 I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
835 834
836 /* Cache mode state */ 835 /* Cache mode state */
837 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 836 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
846 for (i = 0; i < 3; i++) 845 for (i = 0; i < 3; i++)
847 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 846 I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
848 847
848 /* I2C state */
849 intel_i2c_reset_gmbus(dev);
850
849 return 0; 851 return 0;
850} 852}
851 853
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 96cd256e60e6..f27567747580 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
114 struct lvds_dvo_timing *dvo_timing; 114 struct lvds_dvo_timing *dvo_timing;
115 struct drm_display_mode *panel_fixed_mode; 115 struct drm_display_mode *panel_fixed_mode;
116 int lfp_data_size, dvo_timing_offset; 116 int lfp_data_size, dvo_timing_offset;
117 int i, temp_downclock;
118 struct drm_display_mode *temp_mode;
117 119
118 /* Defaults if we can't find VBT info */ 120 /* Defaults if we can't find VBT info */
119 dev_priv->lvds_dither = 0; 121 dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
159 161
160 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 162 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
161 163
162 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 164 DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
163 drm_mode_debug_printmodeline(panel_fixed_mode); 165 drm_mode_debug_printmodeline(panel_fixed_mode);
164 166
167 temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
168 temp_downclock = panel_fixed_mode->clock;
169 /*
170 * enumerate the LVDS panel timing info entry in VBT to check whether
171 * the LVDS downclock is found.
172 */
173 for (i = 0; i < 16; i++) {
174 entry = (struct bdb_lvds_lfp_data_entry *)
175 ((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
176 dvo_timing = (struct lvds_dvo_timing *)
177 ((unsigned char *)entry + dvo_timing_offset);
178
179 fill_detail_timing_data(temp_mode, dvo_timing);
180
181 if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
182 temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
183 temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
184 temp_mode->htotal == panel_fixed_mode->htotal &&
185 temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
186 temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
187 temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
188 temp_mode->vtotal == panel_fixed_mode->vtotal &&
189 temp_mode->clock < temp_downclock) {
190 /*
191 * downclock is already found. But we expect
192 * to find the lower downclock.
193 */
194 temp_downclock = temp_mode->clock;
195 }
196 /* clear it to zero */
197 memset(temp_mode, 0, sizeof(*temp_mode));
198 }
199 kfree(temp_mode);
200 if (temp_downclock < panel_fixed_mode->clock) {
201 dev_priv->lvds_downclock_avail = 1;
202 dev_priv->lvds_downclock = temp_downclock;
203 DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
204 "Normal Clock %dKHz, downclock %dKHz\n",
205 temp_downclock, panel_fixed_mode->clock);
206 }
165 return; 207 return;
166} 208}
167 209
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
217 if (IS_I85X(dev_priv->dev)) 259 if (IS_I85X(dev_priv->dev))
218 dev_priv->lvds_ssc_freq = 260 dev_priv->lvds_ssc_freq =
219 general->ssc_freq ? 66 : 48; 261 general->ssc_freq ? 66 : 48;
220 else if (IS_IGDNG(dev_priv->dev)) 262 else if (IS_IRONLAKE(dev_priv->dev))
221 dev_priv->lvds_ssc_freq = 263 dev_priv->lvds_ssc_freq =
222 general->ssc_freq ? 100 : 120; 264 general->ssc_freq ? 100 : 120;
223 else 265 else
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
241 GPIOF, 283 GPIOF,
242 }; 284 };
243 285
244 /* Set sensible defaults in case we can't find the general block
245 or it is the wrong chipset */
246 dev_priv->crt_ddc_bus = -1;
247
248 general = find_section(bdb, BDB_GENERAL_DEFINITIONS); 286 general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
249 if (general) { 287 if (general) {
250 u16 block_size = get_blocksize(general); 288 u16 block_size = get_blocksize(general);
251 if (block_size >= sizeof(*general)) { 289 if (block_size >= sizeof(*general)) {
252 int bus_pin = general->crt_ddc_gmbus_pin; 290 int bus_pin = general->crt_ddc_gmbus_pin;
253 DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin); 291 DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
254 if ((bus_pin >= 1) && (bus_pin <= 6)) { 292 if ((bus_pin >= 1) && (bus_pin <= 6)) {
255 dev_priv->crt_ddc_bus = 293 dev_priv->crt_ddc_bus =
256 crt_bus_map_table[bus_pin-1]; 294 crt_bus_map_table[bus_pin-1];
257 } 295 }
258 } else { 296 } else {
259 DRM_DEBUG("BDB_GD too small (%d). Invalid.\n", 297 DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
260 block_size); 298 block_size);
261 } 299 }
262 } 300 }
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
274 312
275 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 313 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
276 if (!p_defs) { 314 if (!p_defs) {
277 DRM_DEBUG("No general definition block is found\n"); 315 DRM_DEBUG_KMS("No general definition block is found\n");
278 return; 316 return;
279 } 317 }
280 /* judge whether the size of child device meets the requirements. 318 /* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
284 */ 322 */
285 if (p_defs->child_dev_size != sizeof(*p_child)) { 323 if (p_defs->child_dev_size != sizeof(*p_child)) {
286 /* different child dev size . Ignore it */ 324 /* different child dev size . Ignore it */
287 DRM_DEBUG("different child size is found. Invalid.\n"); 325 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
288 return; 326 return;
289 } 327 }
290 /* get the block size of general definitions */ 328 /* get the block size of general definitions */
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
310 if (p_child->dvo_port != DEVICE_PORT_DVOB && 348 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
311 p_child->dvo_port != DEVICE_PORT_DVOC) { 349 p_child->dvo_port != DEVICE_PORT_DVOC) {
312 /* skip the incorrect SDVO port */ 350 /* skip the incorrect SDVO port */
313 DRM_DEBUG("Incorrect SDVO port. Skip it \n"); 351 DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
314 continue; 352 continue;
315 } 353 }
316 DRM_DEBUG("the SDVO device with slave addr %2x is found on " 354 DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
317 "%s port\n", 355 " %s port\n",
318 p_child->slave_addr, 356 p_child->slave_addr,
319 (p_child->dvo_port == DEVICE_PORT_DVOB) ? 357 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
320 "SDVOB" : "SDVOC"); 358 "SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
325 p_mapping->dvo_wiring = p_child->dvo_wiring; 363 p_mapping->dvo_wiring = p_child->dvo_wiring;
326 p_mapping->initialized = 1; 364 p_mapping->initialized = 1;
327 } else { 365 } else {
328 DRM_DEBUG("Maybe one SDVO port is shared by " 366 DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
329 "two SDVO device.\n"); 367 "two SDVO device.\n");
330 } 368 }
331 if (p_child->slave2_addr) { 369 if (p_child->slave2_addr) {
332 /* Maybe this is a SDVO device with multiple inputs */ 370 /* Maybe this is a SDVO device with multiple inputs */
333 /* And the mapping info is not added */ 371 /* And the mapping info is not added */
334 DRM_DEBUG("there exists the slave2_addr. Maybe this " 372 DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
335 "is a SDVO device with multiple inputs.\n"); 373 " is a SDVO device with multiple inputs.\n");
336 } 374 }
337 count++; 375 count++;
338 } 376 }
339 377
340 if (!count) { 378 if (!count) {
341 /* No SDVO device info is found */ 379 /* No SDVO device info is found */
342 DRM_DEBUG("No SDVO device info is found in VBT\n"); 380 DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
343 } 381 }
344 return; 382 return;
345} 383}
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv,
366 dev_priv->render_reclock_avail = true; 404 dev_priv->render_reclock_avail = true;
367} 405}
368 406
407static void
408parse_device_mapping(struct drm_i915_private *dev_priv,
409 struct bdb_header *bdb)
410{
411 struct bdb_general_definitions *p_defs;
412 struct child_device_config *p_child, *child_dev_ptr;
413 int i, child_device_num, count;
414 u16 block_size;
415
416 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
417 if (!p_defs) {
418 DRM_DEBUG_KMS("No general definition block is found\n");
419 return;
420 }
421 /* judge whether the size of child device meets the requirements.
422 * If the child device size obtained from general definition block
423 * is different with sizeof(struct child_device_config), skip the
424 * parsing of sdvo device info
425 */
426 if (p_defs->child_dev_size != sizeof(*p_child)) {
427 /* different child dev size . Ignore it */
428 DRM_DEBUG_KMS("different child size is found. Invalid.\n");
429 return;
430 }
431 /* get the block size of general definitions */
432 block_size = get_blocksize(p_defs);
433 /* get the number of child device */
434 child_device_num = (block_size - sizeof(*p_defs)) /
435 sizeof(*p_child);
436 count = 0;
437 /* get the number of child device that is present */
438 for (i = 0; i < child_device_num; i++) {
439 p_child = &(p_defs->devices[i]);
440 if (!p_child->device_type) {
441 /* skip the device block if device type is invalid */
442 continue;
443 }
444 count++;
445 }
446 if (!count) {
447 DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
448 return;
449 }
450 dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
451 if (!dev_priv->child_dev) {
452 DRM_DEBUG_KMS("No memory space for child device\n");
453 return;
454 }
455
456 dev_priv->child_dev_num = count;
457 count = 0;
458 for (i = 0; i < child_device_num; i++) {
459 p_child = &(p_defs->devices[i]);
460 if (!p_child->device_type) {
461 /* skip the device block if device type is invalid */
462 continue;
463 }
464 child_dev_ptr = dev_priv->child_dev + count;
465 count++;
466 memcpy((void *)child_dev_ptr, (void *)p_child,
467 sizeof(*p_child));
468 }
469 return;
470}
369/** 471/**
370 * intel_init_bios - initialize VBIOS settings & find VBT 472 * intel_init_bios - initialize VBIOS settings & find VBT
371 * @dev: DRM device 473 * @dev: DRM device
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev)
417 parse_lfp_panel_data(dev_priv, bdb); 519 parse_lfp_panel_data(dev_priv, bdb);
418 parse_sdvo_panel_data(dev_priv, bdb); 520 parse_sdvo_panel_data(dev_priv, bdb);
419 parse_sdvo_device_mapping(dev_priv, bdb); 521 parse_sdvo_device_mapping(dev_priv, bdb);
522 parse_device_mapping(dev_priv, bdb);
420 parse_driver_features(dev_priv, bdb); 523 parse_driver_features(dev_priv, bdb);
421 524
422 pci_unmap_rom(pdev, bios); 525 pci_unmap_rom(pdev, bios);
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 0f8e5f69ac7a..425ac9d7f724 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev);
549#define SWF14_APM_STANDBY 0x1 549#define SWF14_APM_STANDBY 0x1
550#define SWF14_APM_RESTORE 0x0 550#define SWF14_APM_RESTORE 0x0
551 551
552/* Add the device class for LFP, TV, HDMI */
553#define DEVICE_TYPE_INT_LFP 0x1022
554#define DEVICE_TYPE_INT_TV 0x1009
555#define DEVICE_TYPE_HDMI 0x60D2
556#define DEVICE_TYPE_DP 0x68C6
557#define DEVICE_TYPE_eDP 0x78C6
558
559/* define the DVO port for HDMI output type */
560#define DVO_B 1
561#define DVO_C 2
562#define DVO_D 3
563
564/* define the PORT for DP output type */
565#define PORT_IDPB 7
566#define PORT_IDPC 8
567#define PORT_IDPD 9
568
552#endif /* _I830_BIOS_H_ */ 569#endif /* _I830_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e5051446c48e..9f3d3e563414 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp, reg; 40 u32 temp, reg;
41 41
42 if (IS_IGDNG(dev)) 42 if (IS_IRONLAKE(dev))
43 reg = PCH_ADPA; 43 reg = PCH_ADPA;
44 else 44 else
45 reg = ADPA; 45 reg = ADPA;
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
64 } 64 }
65 65
66 I915_WRITE(reg, temp); 66 I915_WRITE(reg, temp);
67
68 if (IS_IGD(dev)) {
69 if (mode == DRM_MODE_DPMS_OFF) {
70 /* turn off DAC */
71 temp = I915_READ(PORT_HOTPLUG_EN);
72 temp &= ~CRT_EOS_INT_EN;
73 I915_WRITE(PORT_HOTPLUG_EN, temp);
74
75 temp = I915_READ(PORT_HOTPLUG_STAT);
76 if (temp & CRT_EOS_INT_STATUS)
77 I915_WRITE(PORT_HOTPLUG_STAT,
78 CRT_EOS_INT_STATUS);
79 } else {
80 /* turn on DAC. EOS interrupt must be enabled after DAC
81 * is enabled, so it sounds not good to enable it in
82 * i915_driver_irq_postinstall()
83 * wait 12.5ms after DAC is enabled
84 */
85 msleep(13);
86 temp = I915_READ(PORT_HOTPLUG_STAT);
87 if (temp & CRT_EOS_INT_STATUS)
88 I915_WRITE(PORT_HOTPLUG_STAT,
89 CRT_EOS_INT_STATUS);
90 temp = I915_READ(PORT_HOTPLUG_EN);
91 temp |= CRT_EOS_INT_EN;
92 I915_WRITE(PORT_HOTPLUG_EN, temp);
93 }
94 }
95} 67}
96 68
97static int intel_crt_mode_valid(struct drm_connector *connector, 69static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
141 else 113 else
142 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
143 115
144 if (IS_IGDNG(dev)) 116 if (IS_IRONLAKE(dev))
145 adpa_reg = PCH_ADPA; 117 adpa_reg = PCH_ADPA;
146 else 118 else
147 adpa_reg = ADPA; 119 adpa_reg = ADPA;
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
150 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
151 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
152 */ 124 */
153 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 125 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
154 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
155 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
156 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
164 136
165 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
166 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
167 if (!IS_IGDNG(dev)) 139 if (!IS_IRONLAKE(dev))
168 I915_WRITE(BCLRPAT_A, 0); 140 I915_WRITE(BCLRPAT_A, 0);
169 } else { 141 } else {
170 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
171 if (!IS_IGDNG(dev)) 143 if (!IS_IRONLAKE(dev))
172 I915_WRITE(BCLRPAT_B, 0); 144 I915_WRITE(BCLRPAT_B, 0);
173 } 145 }
174 146
175 I915_WRITE(adpa_reg, adpa); 147 I915_WRITE(adpa_reg, adpa);
176} 148}
177 149
178static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector) 150static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
179{ 151{
180 struct drm_device *dev = connector->dev; 152 struct drm_device *dev = connector->dev;
181 struct drm_i915_private *dev_priv = dev->dev_private; 153 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
194 ADPA_CRT_HOTPLUG_ENABLE | 166 ADPA_CRT_HOTPLUG_ENABLE |
195 ADPA_CRT_HOTPLUG_FORCE_TRIGGER); 167 ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
196 168
197 DRM_DEBUG("pch crt adpa 0x%x", adpa); 169 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
198 I915_WRITE(PCH_ADPA, adpa); 170 I915_WRITE(PCH_ADPA, adpa);
199 171
200 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0) 172 while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
227 u32 hotplug_en; 199 u32 hotplug_en;
228 int i, tries = 0; 200 int i, tries = 0;
229 201
230 if (IS_IGDNG(dev)) 202 if (IS_IRONLAKE(dev))
231 return intel_igdng_crt_detect_hotplug(connector); 203 return intel_ironlake_crt_detect_hotplug(connector);
232 204
233 /* 205 /*
234 * On 4 series desktop, CRT detect sequence need to be done twice 206 * On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev)
549 &intel_output->enc); 521 &intel_output->enc);
550 522
551 /* Set up the DDC bus. */ 523 /* Set up the DDC bus. */
552 if (IS_IGDNG(dev)) 524 if (IS_IRONLAKE(dev))
553 i2c_reg = PCH_GPIOA; 525 i2c_reg = PCH_GPIOA;
554 else { 526 else {
555 i2c_reg = GPIOA; 527 i2c_reg = GPIOA;
556 /* Use VBT information for CRT DDC if available */ 528 /* Use VBT information for CRT DDC if available */
557 if (dev_priv->crt_ddc_bus != -1) 529 if (dev_priv->crt_ddc_bus != 0)
558 i2c_reg = dev_priv->crt_ddc_bus; 530 i2c_reg = dev_priv->crt_ddc_bus;
559 } 531 }
560 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); 532 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 099f420de57a..52cd9b006da2 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -32,7 +32,7 @@
32#include "intel_drv.h" 32#include "intel_drv.h"
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35#include "intel_dp.h" 35#include "drm_dp_helper.h"
36 36
37#include "drm_crtc_helper.h" 37#include "drm_crtc_helper.h"
38 38
@@ -102,32 +102,32 @@ struct intel_limit {
102#define I9XX_DOT_MAX 400000 102#define I9XX_DOT_MAX 400000
103#define I9XX_VCO_MIN 1400000 103#define I9XX_VCO_MIN 1400000
104#define I9XX_VCO_MAX 2800000 104#define I9XX_VCO_MAX 2800000
105#define IGD_VCO_MIN 1700000 105#define PINEVIEW_VCO_MIN 1700000
106#define IGD_VCO_MAX 3500000 106#define PINEVIEW_VCO_MAX 3500000
107#define I9XX_N_MIN 1 107#define I9XX_N_MIN 1
108#define I9XX_N_MAX 6 108#define I9XX_N_MAX 6
109/* IGD's Ncounter is a ring counter */ 109/* Pineview's Ncounter is a ring counter */
110#define IGD_N_MIN 3 110#define PINEVIEW_N_MIN 3
111#define IGD_N_MAX 6 111#define PINEVIEW_N_MAX 6
112#define I9XX_M_MIN 70 112#define I9XX_M_MIN 70
113#define I9XX_M_MAX 120 113#define I9XX_M_MAX 120
114#define IGD_M_MIN 2 114#define PINEVIEW_M_MIN 2
115#define IGD_M_MAX 256 115#define PINEVIEW_M_MAX 256
116#define I9XX_M1_MIN 10 116#define I9XX_M1_MIN 10
117#define I9XX_M1_MAX 22 117#define I9XX_M1_MAX 22
118#define I9XX_M2_MIN 5 118#define I9XX_M2_MIN 5
119#define I9XX_M2_MAX 9 119#define I9XX_M2_MAX 9
120/* IGD M1 is reserved, and must be 0 */ 120/* Pineview M1 is reserved, and must be 0 */
121#define IGD_M1_MIN 0 121#define PINEVIEW_M1_MIN 0
122#define IGD_M1_MAX 0 122#define PINEVIEW_M1_MAX 0
123#define IGD_M2_MIN 0 123#define PINEVIEW_M2_MIN 0
124#define IGD_M2_MAX 254 124#define PINEVIEW_M2_MAX 254
125#define I9XX_P_SDVO_DAC_MIN 5 125#define I9XX_P_SDVO_DAC_MIN 5
126#define I9XX_P_SDVO_DAC_MAX 80 126#define I9XX_P_SDVO_DAC_MAX 80
127#define I9XX_P_LVDS_MIN 7 127#define I9XX_P_LVDS_MIN 7
128#define I9XX_P_LVDS_MAX 98 128#define I9XX_P_LVDS_MAX 98
129#define IGD_P_LVDS_MIN 7 129#define PINEVIEW_P_LVDS_MIN 7
130#define IGD_P_LVDS_MAX 112 130#define PINEVIEW_P_LVDS_MAX 112
131#define I9XX_P1_MIN 1 131#define I9XX_P1_MIN 1
132#define I9XX_P1_MAX 8 132#define I9XX_P1_MAX 8
133#define I9XX_P2_SDVO_DAC_SLOW 10 133#define I9XX_P2_SDVO_DAC_SLOW 10
@@ -234,33 +234,33 @@ struct intel_limit {
234#define G4X_P2_DISPLAY_PORT_FAST 10 234#define G4X_P2_DISPLAY_PORT_FAST 10
235#define G4X_P2_DISPLAY_PORT_LIMIT 0 235#define G4X_P2_DISPLAY_PORT_LIMIT 0
236 236
237/* IGDNG */ 237/* Ironlake */
238/* as we calculate clock using (register_value + 2) for 238/* as we calculate clock using (register_value + 2) for
239 N/M1/M2, so here the range value for them is (actual_value-2). 239 N/M1/M2, so here the range value for them is (actual_value-2).
240 */ 240 */
241#define IGDNG_DOT_MIN 25000 241#define IRONLAKE_DOT_MIN 25000
242#define IGDNG_DOT_MAX 350000 242#define IRONLAKE_DOT_MAX 350000
243#define IGDNG_VCO_MIN 1760000 243#define IRONLAKE_VCO_MIN 1760000
244#define IGDNG_VCO_MAX 3510000 244#define IRONLAKE_VCO_MAX 3510000
245#define IGDNG_N_MIN 1 245#define IRONLAKE_N_MIN 1
246#define IGDNG_N_MAX 5 246#define IRONLAKE_N_MAX 5
247#define IGDNG_M_MIN 79 247#define IRONLAKE_M_MIN 79
248#define IGDNG_M_MAX 118 248#define IRONLAKE_M_MAX 118
249#define IGDNG_M1_MIN 12 249#define IRONLAKE_M1_MIN 12
250#define IGDNG_M1_MAX 23 250#define IRONLAKE_M1_MAX 23
251#define IGDNG_M2_MIN 5 251#define IRONLAKE_M2_MIN 5
252#define IGDNG_M2_MAX 9 252#define IRONLAKE_M2_MAX 9
253#define IGDNG_P_SDVO_DAC_MIN 5 253#define IRONLAKE_P_SDVO_DAC_MIN 5
254#define IGDNG_P_SDVO_DAC_MAX 80 254#define IRONLAKE_P_SDVO_DAC_MAX 80
255#define IGDNG_P_LVDS_MIN 28 255#define IRONLAKE_P_LVDS_MIN 28
256#define IGDNG_P_LVDS_MAX 112 256#define IRONLAKE_P_LVDS_MAX 112
257#define IGDNG_P1_MIN 1 257#define IRONLAKE_P1_MIN 1
258#define IGDNG_P1_MAX 8 258#define IRONLAKE_P1_MAX 8
259#define IGDNG_P2_SDVO_DAC_SLOW 10 259#define IRONLAKE_P2_SDVO_DAC_SLOW 10
260#define IGDNG_P2_SDVO_DAC_FAST 5 260#define IRONLAKE_P2_SDVO_DAC_FAST 5
261#define IGDNG_P2_LVDS_SLOW 14 /* single channel */ 261#define IRONLAKE_P2_LVDS_SLOW 14 /* single channel */
262#define IGDNG_P2_LVDS_FAST 7 /* double channel */ 262#define IRONLAKE_P2_LVDS_FAST 7 /* double channel */
263#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */ 263#define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */
264 264
265static bool 265static bool
266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 266intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
@@ -272,15 +272,15 @@ static bool
272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 272intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
273 int target, int refclk, intel_clock_t *best_clock); 273 int target, int refclk, intel_clock_t *best_clock);
274static bool 274static bool
275intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 275intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
276 int target, int refclk, intel_clock_t *best_clock); 276 int target, int refclk, intel_clock_t *best_clock);
277 277
278static bool 278static bool
279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, 279intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
280 int target, int refclk, intel_clock_t *best_clock); 280 int target, int refclk, intel_clock_t *best_clock);
281static bool 281static bool
282intel_find_pll_igdng_dp(const intel_limit_t *, struct drm_crtc *crtc, 282intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
283 int target, int refclk, intel_clock_t *best_clock); 283 int target, int refclk, intel_clock_t *best_clock);
284 284
285static const intel_limit_t intel_limits_i8xx_dvo = { 285static const intel_limit_t intel_limits_i8xx_dvo = {
286 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX }, 286 .dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@@ -453,13 +453,13 @@ static const intel_limit_t intel_limits_g4x_display_port = {
453 .find_pll = intel_find_pll_g4x_dp, 453 .find_pll = intel_find_pll_g4x_dp,
454}; 454};
455 455
456static const intel_limit_t intel_limits_igd_sdvo = { 456static const intel_limit_t intel_limits_pineview_sdvo = {
457 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX}, 457 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
458 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 458 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
459 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 459 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
460 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, 460 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
461 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, 461 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
462 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, 462 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
463 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX }, 463 .p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
464 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 464 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT, 465 .p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
@@ -468,59 +468,59 @@ static const intel_limit_t intel_limits_igd_sdvo = {
468 .find_reduced_pll = intel_find_best_reduced_PLL, 468 .find_reduced_pll = intel_find_best_reduced_PLL,
469}; 469};
470 470
471static const intel_limit_t intel_limits_igd_lvds = { 471static const intel_limit_t intel_limits_pineview_lvds = {
472 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX }, 472 .dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
473 .vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX }, 473 .vco = { .min = PINEVIEW_VCO_MIN, .max = PINEVIEW_VCO_MAX },
474 .n = { .min = IGD_N_MIN, .max = IGD_N_MAX }, 474 .n = { .min = PINEVIEW_N_MIN, .max = PINEVIEW_N_MAX },
475 .m = { .min = IGD_M_MIN, .max = IGD_M_MAX }, 475 .m = { .min = PINEVIEW_M_MIN, .max = PINEVIEW_M_MAX },
476 .m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX }, 476 .m1 = { .min = PINEVIEW_M1_MIN, .max = PINEVIEW_M1_MAX },
477 .m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX }, 477 .m2 = { .min = PINEVIEW_M2_MIN, .max = PINEVIEW_M2_MAX },
478 .p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX }, 478 .p = { .min = PINEVIEW_P_LVDS_MIN, .max = PINEVIEW_P_LVDS_MAX },
479 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX }, 479 .p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
480 /* IGD only supports single-channel mode. */ 480 /* Pineview only supports single-channel mode. */
481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT, 481 .p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 482 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
483 .find_pll = intel_find_best_PLL, 483 .find_pll = intel_find_best_PLL,
484 .find_reduced_pll = intel_find_best_reduced_PLL, 484 .find_reduced_pll = intel_find_best_reduced_PLL,
485}; 485};
486 486
487static const intel_limit_t intel_limits_igdng_sdvo = { 487static const intel_limit_t intel_limits_ironlake_sdvo = {
488 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 488 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
489 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 489 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
490 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 490 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
491 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, 491 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
492 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, 492 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
493 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, 493 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
494 .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX }, 494 .p = { .min = IRONLAKE_P_SDVO_DAC_MIN, .max = IRONLAKE_P_SDVO_DAC_MAX },
495 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, 495 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
496 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, 496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
497 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW, 497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW,
498 .p2_fast = IGDNG_P2_SDVO_DAC_FAST }, 498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST },
499 .find_pll = intel_igdng_find_best_PLL, 499 .find_pll = intel_ironlake_find_best_PLL,
500}; 500};
501 501
502static const intel_limit_t intel_limits_igdng_lvds = { 502static const intel_limit_t intel_limits_ironlake_lvds = {
503 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX }, 503 .dot = { .min = IRONLAKE_DOT_MIN, .max = IRONLAKE_DOT_MAX },
504 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX }, 504 .vco = { .min = IRONLAKE_VCO_MIN, .max = IRONLAKE_VCO_MAX },
505 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX }, 505 .n = { .min = IRONLAKE_N_MIN, .max = IRONLAKE_N_MAX },
506 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX }, 506 .m = { .min = IRONLAKE_M_MIN, .max = IRONLAKE_M_MAX },
507 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX }, 507 .m1 = { .min = IRONLAKE_M1_MIN, .max = IRONLAKE_M1_MAX },
508 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX }, 508 .m2 = { .min = IRONLAKE_M2_MIN, .max = IRONLAKE_M2_MAX },
509 .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX }, 509 .p = { .min = IRONLAKE_P_LVDS_MIN, .max = IRONLAKE_P_LVDS_MAX },
510 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX }, 510 .p1 = { .min = IRONLAKE_P1_MIN, .max = IRONLAKE_P1_MAX },
511 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT, 511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT,
512 .p2_slow = IGDNG_P2_LVDS_SLOW, 512 .p2_slow = IRONLAKE_P2_LVDS_SLOW,
513 .p2_fast = IGDNG_P2_LVDS_FAST }, 513 .p2_fast = IRONLAKE_P2_LVDS_FAST },
514 .find_pll = intel_igdng_find_best_PLL, 514 .find_pll = intel_ironlake_find_best_PLL,
515}; 515};
516 516
517static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc) 517static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
518{ 518{
519 const intel_limit_t *limit; 519 const intel_limit_t *limit;
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
521 limit = &intel_limits_igdng_lvds; 521 limit = &intel_limits_ironlake_lvds;
522 else 522 else
523 limit = &intel_limits_igdng_sdvo; 523 limit = &intel_limits_ironlake_sdvo;
524 524
525 return limit; 525 return limit;
526} 526}
@@ -557,20 +557,20 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
557 struct drm_device *dev = crtc->dev; 557 struct drm_device *dev = crtc->dev;
558 const intel_limit_t *limit; 558 const intel_limit_t *limit;
559 559
560 if (IS_IGDNG(dev)) 560 if (IS_IRONLAKE(dev))
561 limit = intel_igdng_limit(crtc); 561 limit = intel_ironlake_limit(crtc);
562 else if (IS_G4X(dev)) { 562 else if (IS_G4X(dev)) {
563 limit = intel_g4x_limit(crtc); 563 limit = intel_g4x_limit(crtc);
564 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 564 } else if (IS_I9XX(dev) && !IS_PINEVIEW(dev)) {
565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 565 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
566 limit = &intel_limits_i9xx_lvds; 566 limit = &intel_limits_i9xx_lvds;
567 else 567 else
568 limit = &intel_limits_i9xx_sdvo; 568 limit = &intel_limits_i9xx_sdvo;
569 } else if (IS_IGD(dev)) { 569 } else if (IS_PINEVIEW(dev)) {
570 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 570 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
571 limit = &intel_limits_igd_lvds; 571 limit = &intel_limits_pineview_lvds;
572 else 572 else
573 limit = &intel_limits_igd_sdvo; 573 limit = &intel_limits_pineview_sdvo;
574 } else { 574 } else {
575 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 575 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
576 limit = &intel_limits_i8xx_lvds; 576 limit = &intel_limits_i8xx_lvds;
@@ -580,8 +580,8 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
580 return limit; 580 return limit;
581} 581}
582 582
583/* m1 is reserved as 0 in IGD, n is a ring counter */ 583/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static void igd_clock(int refclk, intel_clock_t *clock) 584static void pineview_clock(int refclk, intel_clock_t *clock)
585{ 585{
586 clock->m = clock->m2 + 2; 586 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 587 clock->p = clock->p1 * clock->p2;
@@ -591,8 +591,8 @@ static void igd_clock(int refclk, intel_clock_t *clock)
591 591
592static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) 592static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
593{ 593{
594 if (IS_IGD(dev)) { 594 if (IS_PINEVIEW(dev)) {
595 igd_clock(refclk, clock); 595 pineview_clock(refclk, clock);
596 return; 596 return;
597 } 597 }
598 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 598 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
@@ -657,7 +657,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
657 INTELPllInvalid ("m2 out of range\n"); 657 INTELPllInvalid ("m2 out of range\n");
658 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 658 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
659 INTELPllInvalid ("m1 out of range\n"); 659 INTELPllInvalid ("m1 out of range\n");
660 if (clock->m1 <= clock->m2 && !IS_IGD(dev)) 660 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
661 INTELPllInvalid ("m1 <= m2\n"); 661 INTELPllInvalid ("m1 <= m2\n");
662 if (clock->m < limit->m.min || limit->m.max < clock->m) 662 if (clock->m < limit->m.min || limit->m.max < clock->m)
663 INTELPllInvalid ("m out of range\n"); 663 INTELPllInvalid ("m out of range\n");
@@ -706,16 +706,17 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
706 706
707 memset (best_clock, 0, sizeof (*best_clock)); 707 memset (best_clock, 0, sizeof (*best_clock));
708 708
709 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 709 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
710 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 710 clock.m1++) {
711 clock.m1++) { 711 for (clock.m2 = limit->m2.min;
712 for (clock.m2 = limit->m2.min; 712 clock.m2 <= limit->m2.max; clock.m2++) {
713 clock.m2 <= limit->m2.max; clock.m2++) { 713 /* m1 is always 0 in Pineview */
714 /* m1 is always 0 in IGD */ 714 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
715 if (clock.m2 >= clock.m1 && !IS_IGD(dev)) 715 break;
716 break; 716 for (clock.n = limit->n.min;
717 for (clock.n = limit->n.min; 717 clock.n <= limit->n.max; clock.n++) {
718 clock.n <= limit->n.max; clock.n++) { 718 for (clock.p1 = limit->p1.min;
719 clock.p1 <= limit->p1.max; clock.p1++) {
719 int this_err; 720 int this_err;
720 721
721 intel_clock(dev, refclk, &clock); 722 intel_clock(dev, refclk, &clock);
@@ -751,8 +752,8 @@ intel_find_best_reduced_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
751 752
752 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 753 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
753 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) { 754 for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
754 /* m1 is always 0 in IGD */ 755 /* m1 is always 0 in Pineview */
755 if (clock.m2 >= clock.m1 && !IS_IGD(dev)) 756 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
756 break; 757 break;
757 for (clock.n = limit->n.min; clock.n <= limit->n.max; 758 for (clock.n = limit->n.min; clock.n <= limit->n.max;
758 clock.n++) { 759 clock.n++) {
@@ -833,8 +834,8 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
833} 834}
834 835
835static bool 836static bool
836intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc, 837intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
837 int target, int refclk, intel_clock_t *best_clock) 838 int target, int refclk, intel_clock_t *best_clock)
838{ 839{
839 struct drm_device *dev = crtc->dev; 840 struct drm_device *dev = crtc->dev;
840 intel_clock_t clock; 841 intel_clock_t clock;
@@ -857,8 +858,8 @@ intel_find_pll_igdng_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
857} 858}
858 859
859static bool 860static bool
860intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 861intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
861 int target, int refclk, intel_clock_t *best_clock) 862 int target, int refclk, intel_clock_t *best_clock)
862{ 863{
863 struct drm_device *dev = crtc->dev; 864 struct drm_device *dev = crtc->dev;
864 struct drm_i915_private *dev_priv = dev->dev_private; 865 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -871,7 +872,7 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
871 return true; 872 return true;
872 873
873 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 874 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
874 return intel_find_pll_igdng_dp(limit, crtc, target, 875 return intel_find_pll_ironlake_dp(limit, crtc, target,
875 refclk, best_clock); 876 refclk, best_clock);
876 877
877 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 878 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
@@ -949,7 +950,7 @@ void
949intel_wait_for_vblank(struct drm_device *dev) 950intel_wait_for_vblank(struct drm_device *dev)
950{ 951{
951 /* Wait for 20ms, i.e. one cycle at 50hz. */ 952 /* Wait for 20ms, i.e. one cycle at 50hz. */
952 mdelay(20); 953 msleep(20);
953} 954}
954 955
955/* Parameters have changed, update FBC info */ 956/* Parameters have changed, update FBC info */
@@ -994,7 +995,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
994 fbc_ctl |= dev_priv->cfb_fence; 995 fbc_ctl |= dev_priv->cfb_fence;
995 I915_WRITE(FBC_CONTROL, fbc_ctl); 996 I915_WRITE(FBC_CONTROL, fbc_ctl);
996 997
997 DRM_DEBUG("enabled FBC, pitch %ld, yoff %d, plane %d, ", 998 DRM_DEBUG_KMS("enabled FBC, pitch %ld, yoff %d, plane %d, ",
998 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane); 999 dev_priv->cfb_pitch, crtc->y, dev_priv->cfb_plane);
999} 1000}
1000 1001
@@ -1017,7 +1018,7 @@ void i8xx_disable_fbc(struct drm_device *dev)
1017 1018
1018 intel_wait_for_vblank(dev); 1019 intel_wait_for_vblank(dev);
1019 1020
1020 DRM_DEBUG("disabled FBC\n"); 1021 DRM_DEBUG_KMS("disabled FBC\n");
1021} 1022}
1022 1023
1023static bool i8xx_fbc_enabled(struct drm_crtc *crtc) 1024static bool i8xx_fbc_enabled(struct drm_crtc *crtc)
@@ -1062,7 +1063,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1062 /* enable it... */ 1063 /* enable it... */
1063 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); 1064 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1064 1065
1065 DRM_DEBUG("enabled fbc on plane %d\n", intel_crtc->plane); 1066 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1066} 1067}
1067 1068
1068void g4x_disable_fbc(struct drm_device *dev) 1069void g4x_disable_fbc(struct drm_device *dev)
@@ -1076,7 +1077,7 @@ void g4x_disable_fbc(struct drm_device *dev)
1076 I915_WRITE(DPFC_CONTROL, dpfc_ctl); 1077 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1077 intel_wait_for_vblank(dev); 1078 intel_wait_for_vblank(dev);
1078 1079
1079 DRM_DEBUG("disabled FBC\n"); 1080 DRM_DEBUG_KMS("disabled FBC\n");
1080} 1081}
1081 1082
1082static bool g4x_fbc_enabled(struct drm_crtc *crtc) 1083static bool g4x_fbc_enabled(struct drm_crtc *crtc)
@@ -1141,25 +1142,27 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1141 * - going to an unsupported config (interlace, pixel multiply, etc.) 1142 * - going to an unsupported config (interlace, pixel multiply, etc.)
1142 */ 1143 */
1143 if (intel_fb->obj->size > dev_priv->cfb_size) { 1144 if (intel_fb->obj->size > dev_priv->cfb_size) {
1144 DRM_DEBUG("framebuffer too large, disabling compression\n"); 1145 DRM_DEBUG_KMS("framebuffer too large, disabling "
1146 "compression\n");
1145 goto out_disable; 1147 goto out_disable;
1146 } 1148 }
1147 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || 1149 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
1148 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) { 1150 (mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
1149 DRM_DEBUG("mode incompatible with compression, disabling\n"); 1151 DRM_DEBUG_KMS("mode incompatible with compression, "
1152 "disabling\n");
1150 goto out_disable; 1153 goto out_disable;
1151 } 1154 }
1152 if ((mode->hdisplay > 2048) || 1155 if ((mode->hdisplay > 2048) ||
1153 (mode->vdisplay > 1536)) { 1156 (mode->vdisplay > 1536)) {
1154 DRM_DEBUG("mode too large for compression, disabling\n"); 1157 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
1155 goto out_disable; 1158 goto out_disable;
1156 } 1159 }
1157 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) { 1160 if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
1158 DRM_DEBUG("plane not 0, disabling compression\n"); 1161 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
1159 goto out_disable; 1162 goto out_disable;
1160 } 1163 }
1161 if (obj_priv->tiling_mode != I915_TILING_X) { 1164 if (obj_priv->tiling_mode != I915_TILING_X) {
1162 DRM_DEBUG("framebuffer not tiled, disabling compression\n"); 1165 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1163 goto out_disable; 1166 goto out_disable;
1164 } 1167 }
1165 1168
@@ -1181,13 +1184,57 @@ static void intel_update_fbc(struct drm_crtc *crtc,
1181 return; 1184 return;
1182 1185
1183out_disable: 1186out_disable:
1184 DRM_DEBUG("unsupported config, disabling FBC\n"); 1187 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
1185 /* Multiple disables should be harmless */ 1188 /* Multiple disables should be harmless */
1186 if (dev_priv->display.fbc_enabled(crtc)) 1189 if (dev_priv->display.fbc_enabled(crtc))
1187 dev_priv->display.disable_fbc(dev); 1190 dev_priv->display.disable_fbc(dev);
1188} 1191}
1189 1192
1190static int 1193static int
1194intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
1195{
1196 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1197 u32 alignment;
1198 int ret;
1199
1200 switch (obj_priv->tiling_mode) {
1201 case I915_TILING_NONE:
1202 alignment = 64 * 1024;
1203 break;
1204 case I915_TILING_X:
1205 /* pin() will align the object as required by fence */
1206 alignment = 0;
1207 break;
1208 case I915_TILING_Y:
1209 /* FIXME: Is this true? */
1210 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1211 return -EINVAL;
1212 default:
1213 BUG();
1214 }
1215
1216 ret = i915_gem_object_pin(obj, alignment);
1217 if (ret != 0)
1218 return ret;
1219
1220 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1221 * fence, whereas 965+ only requires a fence if using
1222 * framebuffer compression. For simplicity, we always install
1223 * a fence as the cost is not that onerous.
1224 */
1225 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1226 obj_priv->tiling_mode != I915_TILING_NONE) {
1227 ret = i915_gem_object_get_fence_reg(obj);
1228 if (ret != 0) {
1229 i915_gem_object_unpin(obj);
1230 return ret;
1231 }
1232 }
1233
1234 return 0;
1235}
1236
1237static int
1191intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 1238intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1192 struct drm_framebuffer *old_fb) 1239 struct drm_framebuffer *old_fb)
1193{ 1240{
@@ -1206,12 +1253,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1206 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE; 1253 int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
1207 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF); 1254 int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
1208 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR; 1255 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
1209 u32 dspcntr, alignment; 1256 u32 dspcntr;
1210 int ret; 1257 int ret;
1211 1258
1212 /* no fb bound */ 1259 /* no fb bound */
1213 if (!crtc->fb) { 1260 if (!crtc->fb) {
1214 DRM_DEBUG("No FB bound\n"); 1261 DRM_DEBUG_KMS("No FB bound\n");
1215 return 0; 1262 return 0;
1216 } 1263 }
1217 1264
@@ -1228,24 +1275,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1228 obj = intel_fb->obj; 1275 obj = intel_fb->obj;
1229 obj_priv = obj->driver_private; 1276 obj_priv = obj->driver_private;
1230 1277
1231 switch (obj_priv->tiling_mode) {
1232 case I915_TILING_NONE:
1233 alignment = 64 * 1024;
1234 break;
1235 case I915_TILING_X:
1236 /* pin() will align the object as required by fence */
1237 alignment = 0;
1238 break;
1239 case I915_TILING_Y:
1240 /* FIXME: Is this true? */
1241 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1242 return -EINVAL;
1243 default:
1244 BUG();
1245 }
1246
1247 mutex_lock(&dev->struct_mutex); 1278 mutex_lock(&dev->struct_mutex);
1248 ret = i915_gem_object_pin(obj, alignment); 1279 ret = intel_pin_and_fence_fb_obj(dev, obj);
1249 if (ret != 0) { 1280 if (ret != 0) {
1250 mutex_unlock(&dev->struct_mutex); 1281 mutex_unlock(&dev->struct_mutex);
1251 return ret; 1282 return ret;
@@ -1258,20 +1289,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1258 return ret; 1289 return ret;
1259 } 1290 }
1260 1291
1261 /* Install a fence for tiled scan-out. Pre-i965 always needs a fence,
1262 * whereas 965+ only requires a fence if using framebuffer compression.
1263 * For simplicity, we always install a fence as the cost is not that onerous.
1264 */
1265 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
1266 obj_priv->tiling_mode != I915_TILING_NONE) {
1267 ret = i915_gem_object_get_fence_reg(obj);
1268 if (ret != 0) {
1269 i915_gem_object_unpin(obj);
1270 mutex_unlock(&dev->struct_mutex);
1271 return ret;
1272 }
1273 }
1274
1275 dspcntr = I915_READ(dspcntr_reg); 1292 dspcntr = I915_READ(dspcntr_reg);
1276 /* Mask out pixel format bits in case we change it */ 1293 /* Mask out pixel format bits in case we change it */
1277 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; 1294 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -1287,7 +1304,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1287 break; 1304 break;
1288 case 24: 1305 case 24:
1289 case 32: 1306 case 32:
1290 dspcntr |= DISPPLANE_32BPP_NO_ALPHA; 1307 if (crtc->fb->depth == 30)
1308 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1309 else
1310 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1291 break; 1311 break;
1292 default: 1312 default:
1293 DRM_ERROR("Unknown color depth\n"); 1313 DRM_ERROR("Unknown color depth\n");
@@ -1302,7 +1322,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1302 dspcntr &= ~DISPPLANE_TILED; 1322 dspcntr &= ~DISPPLANE_TILED;
1303 } 1323 }
1304 1324
1305 if (IS_IGDNG(dev)) 1325 if (IS_IRONLAKE(dev))
1306 /* must disable */ 1326 /* must disable */
1307 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 1327 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1308 1328
@@ -1311,7 +1331,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1311 Start = obj_priv->gtt_offset; 1331 Start = obj_priv->gtt_offset;
1312 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8); 1332 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
1313 1333
1314 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 1334 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
1315 I915_WRITE(dspstride, crtc->fb->pitch); 1335 I915_WRITE(dspstride, crtc->fb->pitch);
1316 if (IS_I965G(dev)) { 1336 if (IS_I965G(dev)) {
1317 I915_WRITE(dspbase, Offset); 1337 I915_WRITE(dspbase, Offset);
@@ -1363,7 +1383,7 @@ static void i915_disable_vga (struct drm_device *dev)
1363 u8 sr1; 1383 u8 sr1;
1364 u32 vga_reg; 1384 u32 vga_reg;
1365 1385
1366 if (IS_IGDNG(dev)) 1386 if (IS_IRONLAKE(dev))
1367 vga_reg = CPU_VGACNTRL; 1387 vga_reg = CPU_VGACNTRL;
1368 else 1388 else
1369 vga_reg = VGACNTRL; 1389 vga_reg = VGACNTRL;
@@ -1379,19 +1399,19 @@ static void i915_disable_vga (struct drm_device *dev)
1379 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 1399 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
1380} 1400}
1381 1401
1382static void igdng_disable_pll_edp (struct drm_crtc *crtc) 1402static void ironlake_disable_pll_edp (struct drm_crtc *crtc)
1383{ 1403{
1384 struct drm_device *dev = crtc->dev; 1404 struct drm_device *dev = crtc->dev;
1385 struct drm_i915_private *dev_priv = dev->dev_private; 1405 struct drm_i915_private *dev_priv = dev->dev_private;
1386 u32 dpa_ctl; 1406 u32 dpa_ctl;
1387 1407
1388 DRM_DEBUG("\n"); 1408 DRM_DEBUG_KMS("\n");
1389 dpa_ctl = I915_READ(DP_A); 1409 dpa_ctl = I915_READ(DP_A);
1390 dpa_ctl &= ~DP_PLL_ENABLE; 1410 dpa_ctl &= ~DP_PLL_ENABLE;
1391 I915_WRITE(DP_A, dpa_ctl); 1411 I915_WRITE(DP_A, dpa_ctl);
1392} 1412}
1393 1413
1394static void igdng_enable_pll_edp (struct drm_crtc *crtc) 1414static void ironlake_enable_pll_edp (struct drm_crtc *crtc)
1395{ 1415{
1396 struct drm_device *dev = crtc->dev; 1416 struct drm_device *dev = crtc->dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private; 1417 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1404,13 +1424,13 @@ static void igdng_enable_pll_edp (struct drm_crtc *crtc)
1404} 1424}
1405 1425
1406 1426
1407static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock) 1427static void ironlake_set_pll_edp (struct drm_crtc *crtc, int clock)
1408{ 1428{
1409 struct drm_device *dev = crtc->dev; 1429 struct drm_device *dev = crtc->dev;
1410 struct drm_i915_private *dev_priv = dev->dev_private; 1430 struct drm_i915_private *dev_priv = dev->dev_private;
1411 u32 dpa_ctl; 1431 u32 dpa_ctl;
1412 1432
1413 DRM_DEBUG("eDP PLL enable for clock %d\n", clock); 1433 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
1414 dpa_ctl = I915_READ(DP_A); 1434 dpa_ctl = I915_READ(DP_A);
1415 dpa_ctl &= ~DP_PLL_FREQ_MASK; 1435 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1416 1436
@@ -1440,7 +1460,7 @@ static void igdng_set_pll_edp (struct drm_crtc *crtc, int clock)
1440 udelay(500); 1460 udelay(500);
1441} 1461}
1442 1462
1443static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode) 1463static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
1444{ 1464{
1445 struct drm_device *dev = crtc->dev; 1465 struct drm_device *dev = crtc->dev;
1446 struct drm_i915_private *dev_priv = dev->dev_private; 1466 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1481,10 +1501,19 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1481 case DRM_MODE_DPMS_ON: 1501 case DRM_MODE_DPMS_ON:
1482 case DRM_MODE_DPMS_STANDBY: 1502 case DRM_MODE_DPMS_STANDBY:
1483 case DRM_MODE_DPMS_SUSPEND: 1503 case DRM_MODE_DPMS_SUSPEND:
1484 DRM_DEBUG("crtc %d dpms on\n", pipe); 1504 DRM_DEBUG_KMS("crtc %d dpms on\n", pipe);
1505
1506 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1507 temp = I915_READ(PCH_LVDS);
1508 if ((temp & LVDS_PORT_EN) == 0) {
1509 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
1510 POSTING_READ(PCH_LVDS);
1511 }
1512 }
1513
1485 if (HAS_eDP) { 1514 if (HAS_eDP) {
1486 /* enable eDP PLL */ 1515 /* enable eDP PLL */
1487 igdng_enable_pll_edp(crtc); 1516 ironlake_enable_pll_edp(crtc);
1488 } else { 1517 } else {
1489 /* enable PCH DPLL */ 1518 /* enable PCH DPLL */
1490 temp = I915_READ(pch_dpll_reg); 1519 temp = I915_READ(pch_dpll_reg);
@@ -1501,7 +1530,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1501 I915_READ(fdi_rx_reg); 1530 I915_READ(fdi_rx_reg);
1502 udelay(200); 1531 udelay(200);
1503 1532
1504 /* Enable CPU FDI TX PLL, always on for IGDNG */ 1533 /* Enable CPU FDI TX PLL, always on for Ironlake */
1505 temp = I915_READ(fdi_tx_reg); 1534 temp = I915_READ(fdi_tx_reg);
1506 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 1535 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
1507 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE); 1536 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
@@ -1568,12 +1597,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1568 udelay(150); 1597 udelay(150);
1569 1598
1570 temp = I915_READ(fdi_rx_iir_reg); 1599 temp = I915_READ(fdi_rx_iir_reg);
1571 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1600 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1572 1601
1573 if ((temp & FDI_RX_BIT_LOCK) == 0) { 1602 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1574 for (j = 0; j < tries; j++) { 1603 for (j = 0; j < tries; j++) {
1575 temp = I915_READ(fdi_rx_iir_reg); 1604 temp = I915_READ(fdi_rx_iir_reg);
1576 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1605 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1606 temp);
1577 if (temp & FDI_RX_BIT_LOCK) 1607 if (temp & FDI_RX_BIT_LOCK)
1578 break; 1608 break;
1579 udelay(200); 1609 udelay(200);
@@ -1582,11 +1612,11 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1582 I915_WRITE(fdi_rx_iir_reg, 1612 I915_WRITE(fdi_rx_iir_reg,
1583 temp | FDI_RX_BIT_LOCK); 1613 temp | FDI_RX_BIT_LOCK);
1584 else 1614 else
1585 DRM_DEBUG("train 1 fail\n"); 1615 DRM_DEBUG_KMS("train 1 fail\n");
1586 } else { 1616 } else {
1587 I915_WRITE(fdi_rx_iir_reg, 1617 I915_WRITE(fdi_rx_iir_reg,
1588 temp | FDI_RX_BIT_LOCK); 1618 temp | FDI_RX_BIT_LOCK);
1589 DRM_DEBUG("train 1 ok 2!\n"); 1619 DRM_DEBUG_KMS("train 1 ok 2!\n");
1590 } 1620 }
1591 temp = I915_READ(fdi_tx_reg); 1621 temp = I915_READ(fdi_tx_reg);
1592 temp &= ~FDI_LINK_TRAIN_NONE; 1622 temp &= ~FDI_LINK_TRAIN_NONE;
@@ -1601,12 +1631,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1601 udelay(150); 1631 udelay(150);
1602 1632
1603 temp = I915_READ(fdi_rx_iir_reg); 1633 temp = I915_READ(fdi_rx_iir_reg);
1604 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1634 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
1605 1635
1606 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) { 1636 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1607 for (j = 0; j < tries; j++) { 1637 for (j = 0; j < tries; j++) {
1608 temp = I915_READ(fdi_rx_iir_reg); 1638 temp = I915_READ(fdi_rx_iir_reg);
1609 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp); 1639 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n",
1640 temp);
1610 if (temp & FDI_RX_SYMBOL_LOCK) 1641 if (temp & FDI_RX_SYMBOL_LOCK)
1611 break; 1642 break;
1612 udelay(200); 1643 udelay(200);
@@ -1614,15 +1645,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1614 if (j != tries) { 1645 if (j != tries) {
1615 I915_WRITE(fdi_rx_iir_reg, 1646 I915_WRITE(fdi_rx_iir_reg,
1616 temp | FDI_RX_SYMBOL_LOCK); 1647 temp | FDI_RX_SYMBOL_LOCK);
1617 DRM_DEBUG("train 2 ok 1!\n"); 1648 DRM_DEBUG_KMS("train 2 ok 1!\n");
1618 } else 1649 } else
1619 DRM_DEBUG("train 2 fail\n"); 1650 DRM_DEBUG_KMS("train 2 fail\n");
1620 } else { 1651 } else {
1621 I915_WRITE(fdi_rx_iir_reg, 1652 I915_WRITE(fdi_rx_iir_reg,
1622 temp | FDI_RX_SYMBOL_LOCK); 1653 temp | FDI_RX_SYMBOL_LOCK);
1623 DRM_DEBUG("train 2 ok 2!\n"); 1654 DRM_DEBUG_KMS("train 2 ok 2!\n");
1624 } 1655 }
1625 DRM_DEBUG("train done\n"); 1656 DRM_DEBUG_KMS("train done\n");
1626 1657
1627 /* set transcoder timing */ 1658 /* set transcoder timing */
1628 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg)); 1659 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
@@ -1664,9 +1695,7 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1664 1695
1665 break; 1696 break;
1666 case DRM_MODE_DPMS_OFF: 1697 case DRM_MODE_DPMS_OFF:
1667 DRM_DEBUG("crtc %d dpms off\n", pipe); 1698 DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
1668
1669 i915_disable_vga(dev);
1670 1699
1671 /* Disable display plane */ 1700 /* Disable display plane */
1672 temp = I915_READ(dspcntr_reg); 1701 temp = I915_READ(dspcntr_reg);
@@ -1677,6 +1706,8 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1677 I915_READ(dspbase_reg); 1706 I915_READ(dspbase_reg);
1678 } 1707 }
1679 1708
1709 i915_disable_vga(dev);
1710
1680 /* disable cpu pipe, disable after all planes disabled */ 1711 /* disable cpu pipe, disable after all planes disabled */
1681 temp = I915_READ(pipeconf_reg); 1712 temp = I915_READ(pipeconf_reg);
1682 if ((temp & PIPEACONF_ENABLE) != 0) { 1713 if ((temp & PIPEACONF_ENABLE) != 0) {
@@ -1690,16 +1721,23 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1690 udelay(500); 1721 udelay(500);
1691 continue; 1722 continue;
1692 } else { 1723 } else {
1693 DRM_DEBUG("pipe %d off delay\n", pipe); 1724 DRM_DEBUG_KMS("pipe %d off delay\n",
1725 pipe);
1694 break; 1726 break;
1695 } 1727 }
1696 } 1728 }
1697 } else 1729 } else
1698 DRM_DEBUG("crtc %d is disabled\n", pipe); 1730 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1699 1731
1700 if (HAS_eDP) { 1732 udelay(100);
1701 igdng_disable_pll_edp(crtc); 1733
1734 /* Disable PF */
1735 temp = I915_READ(pf_ctl_reg);
1736 if ((temp & PF_ENABLE) != 0) {
1737 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1738 I915_READ(pf_ctl_reg);
1702 } 1739 }
1740 I915_WRITE(pf_win_size, 0);
1703 1741
1704 /* disable CPU FDI tx and PCH FDI rx */ 1742 /* disable CPU FDI tx and PCH FDI rx */
1705 temp = I915_READ(fdi_tx_reg); 1743 temp = I915_READ(fdi_tx_reg);
@@ -1725,6 +1763,13 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1725 1763
1726 udelay(100); 1764 udelay(100);
1727 1765
1766 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1767 temp = I915_READ(PCH_LVDS);
1768 I915_WRITE(PCH_LVDS, temp & ~LVDS_PORT_EN);
1769 I915_READ(PCH_LVDS);
1770 udelay(100);
1771 }
1772
1728 /* disable PCH transcoder */ 1773 /* disable PCH transcoder */
1729 temp = I915_READ(transconf_reg); 1774 temp = I915_READ(transconf_reg);
1730 if ((temp & TRANS_ENABLE) != 0) { 1775 if ((temp & TRANS_ENABLE) != 0) {
@@ -1738,12 +1783,15 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1738 udelay(500); 1783 udelay(500);
1739 continue; 1784 continue;
1740 } else { 1785 } else {
1741 DRM_DEBUG("transcoder %d off delay\n", pipe); 1786 DRM_DEBUG_KMS("transcoder %d off "
1787 "delay\n", pipe);
1742 break; 1788 break;
1743 } 1789 }
1744 } 1790 }
1745 } 1791 }
1746 1792
1793 udelay(100);
1794
1747 /* disable PCH DPLL */ 1795 /* disable PCH DPLL */
1748 temp = I915_READ(pch_dpll_reg); 1796 temp = I915_READ(pch_dpll_reg);
1749 if ((temp & DPLL_VCO_ENABLE) != 0) { 1797 if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -1751,14 +1799,20 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1751 I915_READ(pch_dpll_reg); 1799 I915_READ(pch_dpll_reg);
1752 } 1800 }
1753 1801
1754 temp = I915_READ(fdi_rx_reg); 1802 if (HAS_eDP) {
1755 if ((temp & FDI_RX_PLL_ENABLE) != 0) { 1803 ironlake_disable_pll_edp(crtc);
1756 temp &= ~FDI_SEL_PCDCLK;
1757 temp &= ~FDI_RX_PLL_ENABLE;
1758 I915_WRITE(fdi_rx_reg, temp);
1759 I915_READ(fdi_rx_reg);
1760 } 1804 }
1761 1805
1806 temp = I915_READ(fdi_rx_reg);
1807 temp &= ~FDI_SEL_PCDCLK;
1808 I915_WRITE(fdi_rx_reg, temp);
1809 I915_READ(fdi_rx_reg);
1810
1811 temp = I915_READ(fdi_rx_reg);
1812 temp &= ~FDI_RX_PLL_ENABLE;
1813 I915_WRITE(fdi_rx_reg, temp);
1814 I915_READ(fdi_rx_reg);
1815
1762 /* Disable CPU FDI TX PLL */ 1816 /* Disable CPU FDI TX PLL */
1763 temp = I915_READ(fdi_tx_reg); 1817 temp = I915_READ(fdi_tx_reg);
1764 if ((temp & FDI_TX_PLL_ENABLE) != 0) { 1818 if ((temp & FDI_TX_PLL_ENABLE) != 0) {
@@ -1767,20 +1821,43 @@ static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
1767 udelay(100); 1821 udelay(100);
1768 } 1822 }
1769 1823
1770 /* Disable PF */
1771 temp = I915_READ(pf_ctl_reg);
1772 if ((temp & PF_ENABLE) != 0) {
1773 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1774 I915_READ(pf_ctl_reg);
1775 }
1776 I915_WRITE(pf_win_size, 0);
1777
1778 /* Wait for the clocks to turn off. */ 1824 /* Wait for the clocks to turn off. */
1779 udelay(150); 1825 udelay(100);
1780 break; 1826 break;
1781 } 1827 }
1782} 1828}
1783 1829
1830static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
1831{
1832 struct intel_overlay *overlay;
1833 int ret;
1834
1835 if (!enable && intel_crtc->overlay) {
1836 overlay = intel_crtc->overlay;
1837 mutex_lock(&overlay->dev->struct_mutex);
1838 for (;;) {
1839 ret = intel_overlay_switch_off(overlay);
1840 if (ret == 0)
1841 break;
1842
1843 ret = intel_overlay_recover_from_interrupt(overlay, 0);
1844 if (ret != 0) {
1845 /* overlay doesn't react anymore. Usually
1846 * results in a black screen and an unkillable
1847 * X server. */
1848 BUG();
1849 overlay->hw_wedged = HW_WEDGED;
1850 break;
1851 }
1852 }
1853 mutex_unlock(&overlay->dev->struct_mutex);
1854 }
1855 /* Let userspace switch the overlay on again. In most cases userspace
1856 * has to recompute where to put it anyway. */
1857
1858 return;
1859}
1860
1784static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) 1861static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1785{ 1862{
1786 struct drm_device *dev = crtc->dev; 1863 struct drm_device *dev = crtc->dev;
@@ -1839,12 +1916,14 @@ static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
1839 intel_update_fbc(crtc, &crtc->mode); 1916 intel_update_fbc(crtc, &crtc->mode);
1840 1917
1841 /* Give the overlay scaler a chance to enable if it's on this pipe */ 1918 /* Give the overlay scaler a chance to enable if it's on this pipe */
1842 //intel_crtc_dpms_video(crtc, true); TODO 1919 intel_crtc_dpms_overlay(intel_crtc, true);
1843 break; 1920 break;
1844 case DRM_MODE_DPMS_OFF: 1921 case DRM_MODE_DPMS_OFF:
1845 intel_update_watermarks(dev); 1922 intel_update_watermarks(dev);
1923
1846 /* Give the overlay scaler a chance to disable if it's on this pipe */ 1924 /* Give the overlay scaler a chance to disable if it's on this pipe */
1847 //intel_crtc_dpms_video(crtc, FALSE); TODO 1925 intel_crtc_dpms_overlay(intel_crtc, false);
1926 drm_vblank_off(dev, pipe);
1848 1927
1849 if (dev_priv->cfb_plane == plane && 1928 if (dev_priv->cfb_plane == plane &&
1850 dev_priv->display.disable_fbc) 1929 dev_priv->display.disable_fbc)
@@ -1963,7 +2042,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
1963 struct drm_display_mode *adjusted_mode) 2042 struct drm_display_mode *adjusted_mode)
1964{ 2043{
1965 struct drm_device *dev = crtc->dev; 2044 struct drm_device *dev = crtc->dev;
1966 if (IS_IGDNG(dev)) { 2045 if (IS_IRONLAKE(dev)) {
1967 /* FDI link clock is fixed at 2.7G */ 2046 /* FDI link clock is fixed at 2.7G */
1968 if (mode->clock * 3 > 27000 * 4) 2047 if (mode->clock * 3 > 27000 * 4)
1969 return MODE_CLOCK_HIGH; 2048 return MODE_CLOCK_HIGH;
@@ -2039,7 +2118,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
2039 * Return the pipe currently connected to the panel fitter, 2118 * Return the pipe currently connected to the panel fitter,
2040 * or -1 if the panel fitter is not present or not in use 2119 * or -1 if the panel fitter is not present or not in use
2041 */ 2120 */
2042static int intel_panel_fitter_pipe (struct drm_device *dev) 2121int intel_panel_fitter_pipe (struct drm_device *dev)
2043{ 2122{
2044 struct drm_i915_private *dev_priv = dev->dev_private; 2123 struct drm_i915_private *dev_priv = dev->dev_private;
2045 u32 pfit_control; 2124 u32 pfit_control;
@@ -2083,9 +2162,8 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2083#define LINK_N 0x80000 2162#define LINK_N 0x80000
2084 2163
2085static void 2164static void
2086igdng_compute_m_n(int bits_per_pixel, int nlanes, 2165ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2087 int pixel_clock, int link_clock, 2166 int link_clock, struct fdi_m_n *m_n)
2088 struct fdi_m_n *m_n)
2089{ 2167{
2090 u64 temp; 2168 u64 temp;
2091 2169
@@ -2113,34 +2191,34 @@ struct intel_watermark_params {
2113 unsigned long cacheline_size; 2191 unsigned long cacheline_size;
2114}; 2192};
2115 2193
2116/* IGD has different values for various configs */ 2194/* Pineview has different values for various configs */
2117static struct intel_watermark_params igd_display_wm = { 2195static struct intel_watermark_params pineview_display_wm = {
2118 IGD_DISPLAY_FIFO, 2196 PINEVIEW_DISPLAY_FIFO,
2119 IGD_MAX_WM, 2197 PINEVIEW_MAX_WM,
2120 IGD_DFT_WM, 2198 PINEVIEW_DFT_WM,
2121 IGD_GUARD_WM, 2199 PINEVIEW_GUARD_WM,
2122 IGD_FIFO_LINE_SIZE 2200 PINEVIEW_FIFO_LINE_SIZE
2123}; 2201};
2124static struct intel_watermark_params igd_display_hplloff_wm = { 2202static struct intel_watermark_params pineview_display_hplloff_wm = {
2125 IGD_DISPLAY_FIFO, 2203 PINEVIEW_DISPLAY_FIFO,
2126 IGD_MAX_WM, 2204 PINEVIEW_MAX_WM,
2127 IGD_DFT_HPLLOFF_WM, 2205 PINEVIEW_DFT_HPLLOFF_WM,
2128 IGD_GUARD_WM, 2206 PINEVIEW_GUARD_WM,
2129 IGD_FIFO_LINE_SIZE 2207 PINEVIEW_FIFO_LINE_SIZE
2130}; 2208};
2131static struct intel_watermark_params igd_cursor_wm = { 2209static struct intel_watermark_params pineview_cursor_wm = {
2132 IGD_CURSOR_FIFO, 2210 PINEVIEW_CURSOR_FIFO,
2133 IGD_CURSOR_MAX_WM, 2211 PINEVIEW_CURSOR_MAX_WM,
2134 IGD_CURSOR_DFT_WM, 2212 PINEVIEW_CURSOR_DFT_WM,
2135 IGD_CURSOR_GUARD_WM, 2213 PINEVIEW_CURSOR_GUARD_WM,
2136 IGD_FIFO_LINE_SIZE, 2214 PINEVIEW_FIFO_LINE_SIZE,
2137}; 2215};
2138static struct intel_watermark_params igd_cursor_hplloff_wm = { 2216static struct intel_watermark_params pineview_cursor_hplloff_wm = {
2139 IGD_CURSOR_FIFO, 2217 PINEVIEW_CURSOR_FIFO,
2140 IGD_CURSOR_MAX_WM, 2218 PINEVIEW_CURSOR_MAX_WM,
2141 IGD_CURSOR_DFT_WM, 2219 PINEVIEW_CURSOR_DFT_WM,
2142 IGD_CURSOR_GUARD_WM, 2220 PINEVIEW_CURSOR_GUARD_WM,
2143 IGD_FIFO_LINE_SIZE 2221 PINEVIEW_FIFO_LINE_SIZE
2144}; 2222};
2145static struct intel_watermark_params g4x_wm_info = { 2223static struct intel_watermark_params g4x_wm_info = {
2146 G4X_FIFO_SIZE, 2224 G4X_FIFO_SIZE,
@@ -2213,11 +2291,11 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
2213 1000; 2291 1000;
2214 entries_required /= wm->cacheline_size; 2292 entries_required /= wm->cacheline_size;
2215 2293
2216 DRM_DEBUG("FIFO entries required for mode: %d\n", entries_required); 2294 DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
2217 2295
2218 wm_size = wm->fifo_size - (entries_required + wm->guard_size); 2296 wm_size = wm->fifo_size - (entries_required + wm->guard_size);
2219 2297
2220 DRM_DEBUG("FIFO watermark level: %d\n", wm_size); 2298 DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
2221 2299
2222 /* Don't promote wm_size to unsigned... */ 2300 /* Don't promote wm_size to unsigned... */
2223 if (wm_size > (long)wm->max_wm) 2301 if (wm_size > (long)wm->max_wm)
@@ -2279,50 +2357,50 @@ static struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, int fsb,
2279 return latency; 2357 return latency;
2280 } 2358 }
2281 2359
2282 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2360 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2283 2361
2284 return NULL; 2362 return NULL;
2285} 2363}
2286 2364
2287static void igd_disable_cxsr(struct drm_device *dev) 2365static void pineview_disable_cxsr(struct drm_device *dev)
2288{ 2366{
2289 struct drm_i915_private *dev_priv = dev->dev_private; 2367 struct drm_i915_private *dev_priv = dev->dev_private;
2290 u32 reg; 2368 u32 reg;
2291 2369
2292 /* deactivate cxsr */ 2370 /* deactivate cxsr */
2293 reg = I915_READ(DSPFW3); 2371 reg = I915_READ(DSPFW3);
2294 reg &= ~(IGD_SELF_REFRESH_EN); 2372 reg &= ~(PINEVIEW_SELF_REFRESH_EN);
2295 I915_WRITE(DSPFW3, reg); 2373 I915_WRITE(DSPFW3, reg);
2296 DRM_INFO("Big FIFO is disabled\n"); 2374 DRM_INFO("Big FIFO is disabled\n");
2297} 2375}
2298 2376
2299static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock, 2377static void pineview_enable_cxsr(struct drm_device *dev, unsigned long clock,
2300 int pixel_size) 2378 int pixel_size)
2301{ 2379{
2302 struct drm_i915_private *dev_priv = dev->dev_private; 2380 struct drm_i915_private *dev_priv = dev->dev_private;
2303 u32 reg; 2381 u32 reg;
2304 unsigned long wm; 2382 unsigned long wm;
2305 struct cxsr_latency *latency; 2383 struct cxsr_latency *latency;
2306 2384
2307 latency = intel_get_cxsr_latency(IS_IGDG(dev), dev_priv->fsb_freq, 2385 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->fsb_freq,
2308 dev_priv->mem_freq); 2386 dev_priv->mem_freq);
2309 if (!latency) { 2387 if (!latency) {
2310 DRM_DEBUG("Unknown FSB/MEM found, disable CxSR\n"); 2388 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
2311 igd_disable_cxsr(dev); 2389 pineview_disable_cxsr(dev);
2312 return; 2390 return;
2313 } 2391 }
2314 2392
2315 /* Display SR */ 2393 /* Display SR */
2316 wm = intel_calculate_wm(clock, &igd_display_wm, pixel_size, 2394 wm = intel_calculate_wm(clock, &pineview_display_wm, pixel_size,
2317 latency->display_sr); 2395 latency->display_sr);
2318 reg = I915_READ(DSPFW1); 2396 reg = I915_READ(DSPFW1);
2319 reg &= 0x7fffff; 2397 reg &= 0x7fffff;
2320 reg |= wm << 23; 2398 reg |= wm << 23;
2321 I915_WRITE(DSPFW1, reg); 2399 I915_WRITE(DSPFW1, reg);
2322 DRM_DEBUG("DSPFW1 register is %x\n", reg); 2400 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
2323 2401
2324 /* cursor SR */ 2402 /* cursor SR */
2325 wm = intel_calculate_wm(clock, &igd_cursor_wm, pixel_size, 2403 wm = intel_calculate_wm(clock, &pineview_cursor_wm, pixel_size,
2326 latency->cursor_sr); 2404 latency->cursor_sr);
2327 reg = I915_READ(DSPFW3); 2405 reg = I915_READ(DSPFW3);
2328 reg &= ~(0x3f << 24); 2406 reg &= ~(0x3f << 24);
@@ -2330,7 +2408,7 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2330 I915_WRITE(DSPFW3, reg); 2408 I915_WRITE(DSPFW3, reg);
2331 2409
2332 /* Display HPLL off SR */ 2410 /* Display HPLL off SR */
2333 wm = intel_calculate_wm(clock, &igd_display_hplloff_wm, 2411 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
2334 latency->display_hpll_disable, I915_FIFO_LINE_SIZE); 2412 latency->display_hpll_disable, I915_FIFO_LINE_SIZE);
2335 reg = I915_READ(DSPFW3); 2413 reg = I915_READ(DSPFW3);
2336 reg &= 0xfffffe00; 2414 reg &= 0xfffffe00;
@@ -2338,17 +2416,17 @@ static void igd_enable_cxsr(struct drm_device *dev, unsigned long clock,
2338 I915_WRITE(DSPFW3, reg); 2416 I915_WRITE(DSPFW3, reg);
2339 2417
2340 /* cursor HPLL off SR */ 2418 /* cursor HPLL off SR */
2341 wm = intel_calculate_wm(clock, &igd_cursor_hplloff_wm, pixel_size, 2419 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, pixel_size,
2342 latency->cursor_hpll_disable); 2420 latency->cursor_hpll_disable);
2343 reg = I915_READ(DSPFW3); 2421 reg = I915_READ(DSPFW3);
2344 reg &= ~(0x3f << 16); 2422 reg &= ~(0x3f << 16);
2345 reg |= (wm & 0x3f) << 16; 2423 reg |= (wm & 0x3f) << 16;
2346 I915_WRITE(DSPFW3, reg); 2424 I915_WRITE(DSPFW3, reg);
2347 DRM_DEBUG("DSPFW3 register is %x\n", reg); 2425 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
2348 2426
2349 /* activate cxsr */ 2427 /* activate cxsr */
2350 reg = I915_READ(DSPFW3); 2428 reg = I915_READ(DSPFW3);
2351 reg |= IGD_SELF_REFRESH_EN; 2429 reg |= PINEVIEW_SELF_REFRESH_EN;
2352 I915_WRITE(DSPFW3, reg); 2430 I915_WRITE(DSPFW3, reg);
2353 2431
2354 DRM_INFO("Big FIFO is enabled\n"); 2432 DRM_INFO("Big FIFO is enabled\n");
@@ -2384,8 +2462,8 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
2384 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - 2462 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
2385 (dsparb & 0x7f); 2463 (dsparb & 0x7f);
2386 2464
2387 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2465 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2388 size); 2466 plane ? "B" : "A", size);
2389 2467
2390 return size; 2468 return size;
2391} 2469}
@@ -2403,8 +2481,8 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
2403 (dsparb & 0x1ff); 2481 (dsparb & 0x1ff);
2404 size >>= 1; /* Convert to cachelines */ 2482 size >>= 1; /* Convert to cachelines */
2405 2483
2406 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2484 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2407 size); 2485 plane ? "B" : "A", size);
2408 2486
2409 return size; 2487 return size;
2410} 2488}
@@ -2418,7 +2496,8 @@ static int i845_get_fifo_size(struct drm_device *dev, int plane)
2418 size = dsparb & 0x7f; 2496 size = dsparb & 0x7f;
2419 size >>= 2; /* Convert to cachelines */ 2497 size >>= 2; /* Convert to cachelines */
2420 2498
2421 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2499 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2500 plane ? "B" : "A",
2422 size); 2501 size);
2423 2502
2424 return size; 2503 return size;
@@ -2433,8 +2512,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
2433 size = dsparb & 0x7f; 2512 size = dsparb & 0x7f;
2434 size >>= 1; /* Convert to cachelines */ 2513 size >>= 1; /* Convert to cachelines */
2435 2514
2436 DRM_DEBUG("FIFO size - (0x%08x) %s: %d\n", dsparb, plane ? "B" : "A", 2515 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
2437 size); 2516 plane ? "B" : "A", size);
2438 2517
2439 return size; 2518 return size;
2440} 2519}
@@ -2509,15 +2588,39 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
2509 (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); 2588 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
2510} 2589}
2511 2590
2512static void i965_update_wm(struct drm_device *dev, int unused, int unused2, 2591static void i965_update_wm(struct drm_device *dev, int planea_clock,
2513 int unused3, int unused4) 2592 int planeb_clock, int sr_hdisplay, int pixel_size)
2514{ 2593{
2515 struct drm_i915_private *dev_priv = dev->dev_private; 2594 struct drm_i915_private *dev_priv = dev->dev_private;
2595 unsigned long line_time_us;
2596 int sr_clock, sr_entries, srwm = 1;
2597
2598 /* Calc sr entries for one plane configs */
2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
2600 /* self-refresh has much higher latency */
2601 const static int sr_latency_ns = 12000;
2602
2603 sr_clock = planea_clock ? planea_clock : planeb_clock;
2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock);
2605
2606 /* Use ns/us then divide to preserve precision */
2607 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2608 pixel_size * sr_hdisplay) / 1000;
2609 sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
2610 DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
2611 srwm = I945_FIFO_SIZE - sr_entries;
2612 if (srwm < 0)
2613 srwm = 1;
2614 srwm &= 0x3f;
2615 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
2616 }
2516 2617
2517 DRM_DEBUG("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR 8\n"); 2618 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
2619 srwm);
2518 2620
2519 /* 965 has limitations... */ 2621 /* 965 has limitations... */
2520 I915_WRITE(DSPFW1, (8 << 16) | (8 << 8) | (8 << 0)); 2622 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
2623 (8 << 0));
2521 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); 2624 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
2522} 2625}
2523 2626
@@ -2553,7 +2656,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2553 pixel_size, latency_ns); 2656 pixel_size, latency_ns);
2554 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params, 2657 planeb_wm = intel_calculate_wm(planeb_clock, &planeb_params,
2555 pixel_size, latency_ns); 2658 pixel_size, latency_ns);
2556 DRM_DEBUG("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); 2659 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
2557 2660
2558 /* 2661 /*
2559 * Overlay gets an aggressive default since video jitter is bad. 2662 * Overlay gets an aggressive default since video jitter is bad.
@@ -2573,14 +2676,14 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
2573 sr_entries = (((sr_latency_ns / line_time_us) + 1) * 2676 sr_entries = (((sr_latency_ns / line_time_us) + 1) *
2574 pixel_size * sr_hdisplay) / 1000; 2677 pixel_size * sr_hdisplay) / 1000;
2575 sr_entries = roundup(sr_entries / cacheline_size, 1); 2678 sr_entries = roundup(sr_entries / cacheline_size, 1);
2576 DRM_DEBUG("self-refresh entries: %d\n", sr_entries); 2679 DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
2577 srwm = total_size - sr_entries; 2680 srwm = total_size - sr_entries;
2578 if (srwm < 0) 2681 if (srwm < 0)
2579 srwm = 1; 2682 srwm = 1;
2580 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f)); 2683 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
2581 } 2684 }
2582 2685
2583 DRM_DEBUG("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", 2686 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
2584 planea_wm, planeb_wm, cwm, srwm); 2687 planea_wm, planeb_wm, cwm, srwm);
2585 2688
2586 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); 2689 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
@@ -2607,7 +2710,7 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
2607 pixel_size, latency_ns); 2710 pixel_size, latency_ns);
2608 fwater_lo |= (3<<8) | planea_wm; 2711 fwater_lo |= (3<<8) | planea_wm;
2609 2712
2610 DRM_DEBUG("Setting FIFO watermarks - A: %d\n", planea_wm); 2713 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
2611 2714
2612 I915_WRITE(FW_BLC, fwater_lo); 2715 I915_WRITE(FW_BLC, fwater_lo);
2613} 2716}
@@ -2661,11 +2764,11 @@ static void intel_update_watermarks(struct drm_device *dev)
2661 if (crtc->enabled) { 2764 if (crtc->enabled) {
2662 enabled++; 2765 enabled++;
2663 if (intel_crtc->plane == 0) { 2766 if (intel_crtc->plane == 0) {
2664 DRM_DEBUG("plane A (pipe %d) clock: %d\n", 2767 DRM_DEBUG_KMS("plane A (pipe %d) clock: %d\n",
2665 intel_crtc->pipe, crtc->mode.clock); 2768 intel_crtc->pipe, crtc->mode.clock);
2666 planea_clock = crtc->mode.clock; 2769 planea_clock = crtc->mode.clock;
2667 } else { 2770 } else {
2668 DRM_DEBUG("plane B (pipe %d) clock: %d\n", 2771 DRM_DEBUG_KMS("plane B (pipe %d) clock: %d\n",
2669 intel_crtc->pipe, crtc->mode.clock); 2772 intel_crtc->pipe, crtc->mode.clock);
2670 planeb_clock = crtc->mode.clock; 2773 planeb_clock = crtc->mode.clock;
2671 } 2774 }
@@ -2682,10 +2785,10 @@ static void intel_update_watermarks(struct drm_device *dev)
2682 return; 2785 return;
2683 2786
2684 /* Single plane configs can enable self refresh */ 2787 /* Single plane configs can enable self refresh */
2685 if (enabled == 1 && IS_IGD(dev)) 2788 if (enabled == 1 && IS_PINEVIEW(dev))
2686 igd_enable_cxsr(dev, sr_clock, pixel_size); 2789 pineview_enable_cxsr(dev, sr_clock, pixel_size);
2687 else if (IS_IGD(dev)) 2790 else if (IS_PINEVIEW(dev))
2688 igd_disable_cxsr(dev); 2791 pineview_disable_cxsr(dev);
2689 2792
2690 dev_priv->display.update_wm(dev, planea_clock, planeb_clock, 2793 dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
2691 sr_hdisplay, pixel_size); 2794 sr_hdisplay, pixel_size);
@@ -2779,10 +2882,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2779 2882
2780 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) { 2883 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
2781 refclk = dev_priv->lvds_ssc_freq * 1000; 2884 refclk = dev_priv->lvds_ssc_freq * 1000;
2782 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 2885 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
2886 refclk / 1000);
2783 } else if (IS_I9XX(dev)) { 2887 } else if (IS_I9XX(dev)) {
2784 refclk = 96000; 2888 refclk = 96000;
2785 if (IS_IGDNG(dev)) 2889 if (IS_IRONLAKE(dev))
2786 refclk = 120000; /* 120Mhz refclk */ 2890 refclk = 120000; /* 120Mhz refclk */
2787 } else { 2891 } else {
2788 refclk = 48000; 2892 refclk = 48000;
@@ -2802,14 +2906,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2802 return -EINVAL; 2906 return -EINVAL;
2803 } 2907 }
2804 2908
2805 if (limit->find_reduced_pll && dev_priv->lvds_downclock_avail) { 2909 if (is_lvds && limit->find_reduced_pll &&
2910 dev_priv->lvds_downclock_avail) {
2806 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t)); 2911 memcpy(&reduced_clock, &clock, sizeof(intel_clock_t));
2807 has_reduced_clock = limit->find_reduced_pll(limit, crtc, 2912 has_reduced_clock = limit->find_reduced_pll(limit, crtc,
2808 (adjusted_mode->clock*3/4), 2913 dev_priv->lvds_downclock,
2809 refclk, 2914 refclk,
2810 &reduced_clock); 2915 &reduced_clock);
2916 if (has_reduced_clock && (clock.p != reduced_clock.p)) {
2917 /*
2918 * If the different P is found, it means that we can't
2919 * switch the display clock by using the FP0/FP1.
2920 * In such case we will disable the LVDS downclock
2921 * feature.
2922 */
2923 DRM_DEBUG_KMS("Different P is found for "
2924 "LVDS clock/downclock\n");
2925 has_reduced_clock = 0;
2926 }
2811 } 2927 }
2812
2813 /* SDVO TV has fixed PLL values depend on its clock range, 2928 /* SDVO TV has fixed PLL values depend on its clock range,
2814 this mirrors vbios setting. */ 2929 this mirrors vbios setting. */
2815 if (is_sdvo && is_tv) { 2930 if (is_sdvo && is_tv) {
@@ -2831,7 +2946,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2831 } 2946 }
2832 2947
2833 /* FDI link */ 2948 /* FDI link */
2834 if (IS_IGDNG(dev)) { 2949 if (IS_IRONLAKE(dev)) {
2835 int lane, link_bw, bpp; 2950 int lane, link_bw, bpp;
2836 /* eDP doesn't require FDI link, so just set DP M/N 2951 /* eDP doesn't require FDI link, so just set DP M/N
2837 according to current link config */ 2952 according to current link config */
@@ -2873,8 +2988,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2873 bpp = 24; 2988 bpp = 24;
2874 } 2989 }
2875 2990
2876 igdng_compute_m_n(bpp, lane, target_clock, 2991 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
2877 link_bw, &m_n);
2878 } 2992 }
2879 2993
2880 /* Ironlake: try to setup display ref clock before DPLL 2994 /* Ironlake: try to setup display ref clock before DPLL
@@ -2882,7 +2996,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2882 * PCH B stepping, previous chipset stepping should be 2996 * PCH B stepping, previous chipset stepping should be
2883 * ignoring this setting. 2997 * ignoring this setting.
2884 */ 2998 */
2885 if (IS_IGDNG(dev)) { 2999 if (IS_IRONLAKE(dev)) {
2886 temp = I915_READ(PCH_DREF_CONTROL); 3000 temp = I915_READ(PCH_DREF_CONTROL);
2887 /* Always enable nonspread source */ 3001 /* Always enable nonspread source */
2888 temp &= ~DREF_NONSPREAD_SOURCE_MASK; 3002 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -2917,7 +3031,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2917 } 3031 }
2918 } 3032 }
2919 3033
2920 if (IS_IGD(dev)) { 3034 if (IS_PINEVIEW(dev)) {
2921 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 3035 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
2922 if (has_reduced_clock) 3036 if (has_reduced_clock)
2923 fp2 = (1 << reduced_clock.n) << 16 | 3037 fp2 = (1 << reduced_clock.n) << 16 |
@@ -2929,7 +3043,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2929 reduced_clock.m2; 3043 reduced_clock.m2;
2930 } 3044 }
2931 3045
2932 if (!IS_IGDNG(dev)) 3046 if (!IS_IRONLAKE(dev))
2933 dpll = DPLL_VGA_MODE_DIS; 3047 dpll = DPLL_VGA_MODE_DIS;
2934 3048
2935 if (IS_I9XX(dev)) { 3049 if (IS_I9XX(dev)) {
@@ -2942,19 +3056,19 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2942 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3056 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
2943 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 3057 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
2944 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 3058 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
2945 else if (IS_IGDNG(dev)) 3059 else if (IS_IRONLAKE(dev))
2946 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 3060 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
2947 } 3061 }
2948 if (is_dp) 3062 if (is_dp)
2949 dpll |= DPLL_DVO_HIGH_SPEED; 3063 dpll |= DPLL_DVO_HIGH_SPEED;
2950 3064
2951 /* compute bitmask from p1 value */ 3065 /* compute bitmask from p1 value */
2952 if (IS_IGD(dev)) 3066 if (IS_PINEVIEW(dev))
2953 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; 3067 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
2954 else { 3068 else {
2955 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 3069 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
2956 /* also FPA1 */ 3070 /* also FPA1 */
2957 if (IS_IGDNG(dev)) 3071 if (IS_IRONLAKE(dev))
2958 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3072 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
2959 if (IS_G4X(dev) && has_reduced_clock) 3073 if (IS_G4X(dev) && has_reduced_clock)
2960 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 3074 dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -2973,7 +3087,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
2973 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 3087 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
2974 break; 3088 break;
2975 } 3089 }
2976 if (IS_I965G(dev) && !IS_IGDNG(dev)) 3090 if (IS_I965G(dev) && !IS_IRONLAKE(dev))
2977 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 3091 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
2978 } else { 3092 } else {
2979 if (is_lvds) { 3093 if (is_lvds) {
@@ -3005,9 +3119,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3005 /* Set up the display plane register */ 3119 /* Set up the display plane register */
3006 dspcntr = DISPPLANE_GAMMA_ENABLE; 3120 dspcntr = DISPPLANE_GAMMA_ENABLE;
3007 3121
3008 /* IGDNG's plane is forced to pipe, bit 24 is to 3122 /* Ironlake's plane is forced to pipe, bit 24 is to
3009 enable color space conversion */ 3123 enable color space conversion */
3010 if (!IS_IGDNG(dev)) { 3124 if (!IS_IRONLAKE(dev)) {
3011 if (pipe == 0) 3125 if (pipe == 0)
3012 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; 3126 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3013 else 3127 else
@@ -3034,20 +3148,20 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3034 3148
3035 3149
3036 /* Disable the panel fitter if it was on our pipe */ 3150 /* Disable the panel fitter if it was on our pipe */
3037 if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe) 3151 if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
3038 I915_WRITE(PFIT_CONTROL, 0); 3152 I915_WRITE(PFIT_CONTROL, 0);
3039 3153
3040 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 3154 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
3041 drm_mode_debug_printmodeline(mode); 3155 drm_mode_debug_printmodeline(mode);
3042 3156
3043 /* assign to IGDNG registers */ 3157 /* assign to Ironlake registers */
3044 if (IS_IGDNG(dev)) { 3158 if (IS_IRONLAKE(dev)) {
3045 fp_reg = pch_fp_reg; 3159 fp_reg = pch_fp_reg;
3046 dpll_reg = pch_dpll_reg; 3160 dpll_reg = pch_dpll_reg;
3047 } 3161 }
3048 3162
3049 if (is_edp) { 3163 if (is_edp) {
3050 igdng_disable_pll_edp(crtc); 3164 ironlake_disable_pll_edp(crtc);
3051 } else if ((dpll & DPLL_VCO_ENABLE)) { 3165 } else if ((dpll & DPLL_VCO_ENABLE)) {
3052 I915_WRITE(fp_reg, fp); 3166 I915_WRITE(fp_reg, fp);
3053 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); 3167 I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE);
@@ -3062,7 +3176,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3062 if (is_lvds) { 3176 if (is_lvds) {
3063 u32 lvds; 3177 u32 lvds;
3064 3178
3065 if (IS_IGDNG(dev)) 3179 if (IS_IRONLAKE(dev))
3066 lvds_reg = PCH_LVDS; 3180 lvds_reg = PCH_LVDS;
3067 3181
3068 lvds = I915_READ(lvds_reg); 3182 lvds = I915_READ(lvds_reg);
@@ -3095,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3095 /* Wait for the clocks to stabilize. */ 3209 /* Wait for the clocks to stabilize. */
3096 udelay(150); 3210 udelay(150);
3097 3211
3098 if (IS_I965G(dev) && !IS_IGDNG(dev)) { 3212 if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
3099 if (is_sdvo) { 3213 if (is_sdvo) {
3100 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 3214 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
3101 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 3215 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3115,14 +3229,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3115 I915_WRITE(fp_reg + 4, fp2); 3229 I915_WRITE(fp_reg + 4, fp2);
3116 intel_crtc->lowfreq_avail = true; 3230 intel_crtc->lowfreq_avail = true;
3117 if (HAS_PIPE_CXSR(dev)) { 3231 if (HAS_PIPE_CXSR(dev)) {
3118 DRM_DEBUG("enabling CxSR downclocking\n"); 3232 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
3119 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 3233 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
3120 } 3234 }
3121 } else { 3235 } else {
3122 I915_WRITE(fp_reg + 4, fp); 3236 I915_WRITE(fp_reg + 4, fp);
3123 intel_crtc->lowfreq_avail = false; 3237 intel_crtc->lowfreq_avail = false;
3124 if (HAS_PIPE_CXSR(dev)) { 3238 if (HAS_PIPE_CXSR(dev)) {
3125 DRM_DEBUG("disabling CxSR downclocking\n"); 3239 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
3126 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; 3240 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
3127 } 3241 }
3128 } 3242 }
@@ -3142,21 +3256,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3142 /* pipesrc and dspsize control the size that is scaled from, which should 3256 /* pipesrc and dspsize control the size that is scaled from, which should
3143 * always be the user's requested size. 3257 * always be the user's requested size.
3144 */ 3258 */
3145 if (!IS_IGDNG(dev)) { 3259 if (!IS_IRONLAKE(dev)) {
3146 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | 3260 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
3147 (mode->hdisplay - 1)); 3261 (mode->hdisplay - 1));
3148 I915_WRITE(dsppos_reg, 0); 3262 I915_WRITE(dsppos_reg, 0);
3149 } 3263 }
3150 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 3264 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
3151 3265
3152 if (IS_IGDNG(dev)) { 3266 if (IS_IRONLAKE(dev)) {
3153 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m); 3267 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
3154 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n); 3268 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
3155 I915_WRITE(link_m1_reg, m_n.link_m); 3269 I915_WRITE(link_m1_reg, m_n.link_m);
3156 I915_WRITE(link_n1_reg, m_n.link_n); 3270 I915_WRITE(link_n1_reg, m_n.link_n);
3157 3271
3158 if (is_edp) { 3272 if (is_edp) {
3159 igdng_set_pll_edp(crtc, adjusted_mode->clock); 3273 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
3160 } else { 3274 } else {
3161 /* enable FDI RX PLL too */ 3275 /* enable FDI RX PLL too */
3162 temp = I915_READ(fdi_rx_reg); 3276 temp = I915_READ(fdi_rx_reg);
@@ -3170,7 +3284,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3170 3284
3171 intel_wait_for_vblank(dev); 3285 intel_wait_for_vblank(dev);
3172 3286
3173 if (IS_IGDNG(dev)) { 3287 if (IS_IRONLAKE(dev)) {
3174 /* enable address swizzle for tiling buffer */ 3288 /* enable address swizzle for tiling buffer */
3175 temp = I915_READ(DISP_ARB_CTL); 3289 temp = I915_READ(DISP_ARB_CTL);
3176 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); 3290 I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING);
@@ -3204,8 +3318,8 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
3204 if (!crtc->enabled) 3318 if (!crtc->enabled)
3205 return; 3319 return;
3206 3320
3207 /* use legacy palette for IGDNG */ 3321 /* use legacy palette for Ironlake */
3208 if (IS_IGDNG(dev)) 3322 if (IS_IRONLAKE(dev))
3209 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A : 3323 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
3210 LGC_PALETTE_B; 3324 LGC_PALETTE_B;
3211 3325
@@ -3234,11 +3348,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
3234 size_t addr; 3348 size_t addr;
3235 int ret; 3349 int ret;
3236 3350
3237 DRM_DEBUG("\n"); 3351 DRM_DEBUG_KMS("\n");
3238 3352
3239 /* if we want to turn off the cursor ignore width and height */ 3353 /* if we want to turn off the cursor ignore width and height */
3240 if (!handle) { 3354 if (!handle) {
3241 DRM_DEBUG("cursor off\n"); 3355 DRM_DEBUG_KMS("cursor off\n");
3242 if (IS_MOBILE(dev) || IS_I9XX(dev)) { 3356 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
3243 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 3357 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
3244 temp |= CURSOR_MODE_DISABLE; 3358 temp |= CURSOR_MODE_DISABLE;
@@ -3546,18 +3660,18 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
3546 fp = I915_READ((pipe == 0) ? FPA1 : FPB1); 3660 fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
3547 3661
3548 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 3662 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
3549 if (IS_IGD(dev)) { 3663 if (IS_PINEVIEW(dev)) {
3550 clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 3664 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
3551 clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT; 3665 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
3552 } else { 3666 } else {
3553 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 3667 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
3554 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 3668 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
3555 } 3669 }
3556 3670
3557 if (IS_I9XX(dev)) { 3671 if (IS_I9XX(dev)) {
3558 if (IS_IGD(dev)) 3672 if (IS_PINEVIEW(dev))
3559 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >> 3673 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
3560 DPLL_FPA01_P1_POST_DIV_SHIFT_IGD); 3674 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
3561 else 3675 else
3562 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 3676 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
3563 DPLL_FPA01_P1_POST_DIV_SHIFT); 3677 DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -3572,7 +3686,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
3572 7 : 14; 3686 7 : 14;
3573 break; 3687 break;
3574 default: 3688 default:
3575 DRM_DEBUG("Unknown DPLL mode %08x in programmed " 3689 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
3576 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 3690 "mode\n", (int)(dpll & DPLL_MODE_MASK));
3577 return 0; 3691 return 0;
3578 } 3692 }
@@ -3658,7 +3772,7 @@ static void intel_gpu_idle_timer(unsigned long arg)
3658 struct drm_device *dev = (struct drm_device *)arg; 3772 struct drm_device *dev = (struct drm_device *)arg;
3659 drm_i915_private_t *dev_priv = dev->dev_private; 3773 drm_i915_private_t *dev_priv = dev->dev_private;
3660 3774
3661 DRM_DEBUG("idle timer fired, downclocking\n"); 3775 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
3662 3776
3663 dev_priv->busy = false; 3777 dev_priv->busy = false;
3664 3778
@@ -3669,11 +3783,11 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3669{ 3783{
3670 drm_i915_private_t *dev_priv = dev->dev_private; 3784 drm_i915_private_t *dev_priv = dev->dev_private;
3671 3785
3672 if (IS_IGDNG(dev)) 3786 if (IS_IRONLAKE(dev))
3673 return; 3787 return;
3674 3788
3675 if (!dev_priv->render_reclock_avail) { 3789 if (!dev_priv->render_reclock_avail) {
3676 DRM_DEBUG("not reclocking render clock\n"); 3790 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3677 return; 3791 return;
3678 } 3792 }
3679 3793
@@ -3682,7 +3796,7 @@ void intel_increase_renderclock(struct drm_device *dev, bool schedule)
3682 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); 3796 pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock);
3683 else if (IS_I85X(dev)) 3797 else if (IS_I85X(dev))
3684 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); 3798 pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock);
3685 DRM_DEBUG("increasing render clock frequency\n"); 3799 DRM_DEBUG_DRIVER("increasing render clock frequency\n");
3686 3800
3687 /* Schedule downclock */ 3801 /* Schedule downclock */
3688 if (schedule) 3802 if (schedule)
@@ -3694,11 +3808,11 @@ void intel_decrease_renderclock(struct drm_device *dev)
3694{ 3808{
3695 drm_i915_private_t *dev_priv = dev->dev_private; 3809 drm_i915_private_t *dev_priv = dev->dev_private;
3696 3810
3697 if (IS_IGDNG(dev)) 3811 if (IS_IRONLAKE(dev))
3698 return; 3812 return;
3699 3813
3700 if (!dev_priv->render_reclock_avail) { 3814 if (!dev_priv->render_reclock_avail) {
3701 DRM_DEBUG("not reclocking render clock\n"); 3815 DRM_DEBUG_DRIVER("not reclocking render clock\n");
3702 return; 3816 return;
3703 } 3817 }
3704 3818
@@ -3758,7 +3872,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
3758 3872
3759 pci_write_config_word(dev->pdev, HPLLCC, hpllcc); 3873 pci_write_config_word(dev->pdev, HPLLCC, hpllcc);
3760 } 3874 }
3761 DRM_DEBUG("decreasing render clock frequency\n"); 3875 DRM_DEBUG_DRIVER("decreasing render clock frequency\n");
3762} 3876}
3763 3877
3764/* Note that no increase function is needed for this - increase_renderclock() 3878/* Note that no increase function is needed for this - increase_renderclock()
@@ -3766,7 +3880,7 @@ void intel_decrease_renderclock(struct drm_device *dev)
3766 */ 3880 */
3767void intel_decrease_displayclock(struct drm_device *dev) 3881void intel_decrease_displayclock(struct drm_device *dev)
3768{ 3882{
3769 if (IS_IGDNG(dev)) 3883 if (IS_IRONLAKE(dev))
3770 return; 3884 return;
3771 3885
3772 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || 3886 if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) ||
@@ -3792,7 +3906,7 @@ static void intel_crtc_idle_timer(unsigned long arg)
3792 struct drm_crtc *crtc = &intel_crtc->base; 3906 struct drm_crtc *crtc = &intel_crtc->base;
3793 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 3907 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
3794 3908
3795 DRM_DEBUG("idle timer fired, downclocking\n"); 3909 DRM_DEBUG_DRIVER("idle timer fired, downclocking\n");
3796 3910
3797 intel_crtc->busy = false; 3911 intel_crtc->busy = false;
3798 3912
@@ -3808,14 +3922,14 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3808 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3922 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3809 int dpll = I915_READ(dpll_reg); 3923 int dpll = I915_READ(dpll_reg);
3810 3924
3811 if (IS_IGDNG(dev)) 3925 if (IS_IRONLAKE(dev))
3812 return; 3926 return;
3813 3927
3814 if (!dev_priv->lvds_downclock_avail) 3928 if (!dev_priv->lvds_downclock_avail)
3815 return; 3929 return;
3816 3930
3817 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { 3931 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
3818 DRM_DEBUG("upclocking LVDS\n"); 3932 DRM_DEBUG_DRIVER("upclocking LVDS\n");
3819 3933
3820 /* Unlock panel regs */ 3934 /* Unlock panel regs */
3821 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 3935 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3826,7 +3940,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
3826 intel_wait_for_vblank(dev); 3940 intel_wait_for_vblank(dev);
3827 dpll = I915_READ(dpll_reg); 3941 dpll = I915_READ(dpll_reg);
3828 if (dpll & DISPLAY_RATE_SELECT_FPA1) 3942 if (dpll & DISPLAY_RATE_SELECT_FPA1)
3829 DRM_DEBUG("failed to upclock LVDS!\n"); 3943 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
3830 3944
3831 /* ...and lock them again */ 3945 /* ...and lock them again */
3832 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 3946 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3847,7 +3961,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3847 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; 3961 int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
3848 int dpll = I915_READ(dpll_reg); 3962 int dpll = I915_READ(dpll_reg);
3849 3963
3850 if (IS_IGDNG(dev)) 3964 if (IS_IRONLAKE(dev))
3851 return; 3965 return;
3852 3966
3853 if (!dev_priv->lvds_downclock_avail) 3967 if (!dev_priv->lvds_downclock_avail)
@@ -3858,7 +3972,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3858 * the manual case. 3972 * the manual case.
3859 */ 3973 */
3860 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 3974 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
3861 DRM_DEBUG("downclocking LVDS\n"); 3975 DRM_DEBUG_DRIVER("downclocking LVDS\n");
3862 3976
3863 /* Unlock panel regs */ 3977 /* Unlock panel regs */
3864 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16)); 3978 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | (0xabcd << 16));
@@ -3869,7 +3983,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
3869 intel_wait_for_vblank(dev); 3983 intel_wait_for_vblank(dev);
3870 dpll = I915_READ(dpll_reg); 3984 dpll = I915_READ(dpll_reg);
3871 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 3985 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
3872 DRM_DEBUG("failed to downclock LVDS!\n"); 3986 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
3873 3987
3874 /* ...and lock them again */ 3988 /* ...and lock them again */
3875 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); 3989 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3);
@@ -3936,8 +4050,13 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
3936 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4050 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3937 return; 4051 return;
3938 4052
3939 dev_priv->busy = true; 4053 if (!dev_priv->busy) {
3940 intel_increase_renderclock(dev, true); 4054 dev_priv->busy = true;
4055 intel_increase_renderclock(dev, true);
4056 } else {
4057 mod_timer(&dev_priv->idle_timer, jiffies +
4058 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
4059 }
3941 4060
3942 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4061 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3943 if (!crtc->fb) 4062 if (!crtc->fb)
@@ -3967,6 +4086,158 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
3967 kfree(intel_crtc); 4086 kfree(intel_crtc);
3968} 4087}
3969 4088
4089struct intel_unpin_work {
4090 struct work_struct work;
4091 struct drm_device *dev;
4092 struct drm_gem_object *obj;
4093 struct drm_pending_vblank_event *event;
4094 int pending;
4095};
4096
4097static void intel_unpin_work_fn(struct work_struct *__work)
4098{
4099 struct intel_unpin_work *work =
4100 container_of(__work, struct intel_unpin_work, work);
4101
4102 mutex_lock(&work->dev->struct_mutex);
4103 i915_gem_object_unpin(work->obj);
4104 drm_gem_object_unreference(work->obj);
4105 mutex_unlock(&work->dev->struct_mutex);
4106 kfree(work);
4107}
4108
4109void intel_finish_page_flip(struct drm_device *dev, int pipe)
4110{
4111 drm_i915_private_t *dev_priv = dev->dev_private;
4112 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
4113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4114 struct intel_unpin_work *work;
4115 struct drm_i915_gem_object *obj_priv;
4116 struct drm_pending_vblank_event *e;
4117 struct timeval now;
4118 unsigned long flags;
4119
4120 /* Ignore early vblank irqs */
4121 if (intel_crtc == NULL)
4122 return;
4123
4124 spin_lock_irqsave(&dev->event_lock, flags);
4125 work = intel_crtc->unpin_work;
4126 if (work == NULL || !work->pending) {
4127 spin_unlock_irqrestore(&dev->event_lock, flags);
4128 return;
4129 }
4130
4131 intel_crtc->unpin_work = NULL;
4132 drm_vblank_put(dev, intel_crtc->pipe);
4133
4134 if (work->event) {
4135 e = work->event;
4136 do_gettimeofday(&now);
4137 e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
4138 e->event.tv_sec = now.tv_sec;
4139 e->event.tv_usec = now.tv_usec;
4140 list_add_tail(&e->base.link,
4141 &e->base.file_priv->event_list);
4142 wake_up_interruptible(&e->base.file_priv->event_wait);
4143 }
4144
4145 spin_unlock_irqrestore(&dev->event_lock, flags);
4146
4147 obj_priv = work->obj->driver_private;
4148 if (atomic_dec_and_test(&obj_priv->pending_flip))
4149 DRM_WAKEUP(&dev_priv->pending_flip_queue);
4150 schedule_work(&work->work);
4151}
4152
4153void intel_prepare_page_flip(struct drm_device *dev, int plane)
4154{
4155 drm_i915_private_t *dev_priv = dev->dev_private;
4156 struct intel_crtc *intel_crtc =
4157 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
4158 unsigned long flags;
4159
4160 spin_lock_irqsave(&dev->event_lock, flags);
4161 if (intel_crtc->unpin_work)
4162 intel_crtc->unpin_work->pending = 1;
4163 spin_unlock_irqrestore(&dev->event_lock, flags);
4164}
4165
4166static int intel_crtc_page_flip(struct drm_crtc *crtc,
4167 struct drm_framebuffer *fb,
4168 struct drm_pending_vblank_event *event)
4169{
4170 struct drm_device *dev = crtc->dev;
4171 struct drm_i915_private *dev_priv = dev->dev_private;
4172 struct intel_framebuffer *intel_fb;
4173 struct drm_i915_gem_object *obj_priv;
4174 struct drm_gem_object *obj;
4175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4176 struct intel_unpin_work *work;
4177 unsigned long flags;
4178 int ret;
4179 RING_LOCALS;
4180
4181 work = kzalloc(sizeof *work, GFP_KERNEL);
4182 if (work == NULL)
4183 return -ENOMEM;
4184
4185 mutex_lock(&dev->struct_mutex);
4186
4187 work->event = event;
4188 work->dev = crtc->dev;
4189 intel_fb = to_intel_framebuffer(crtc->fb);
4190 work->obj = intel_fb->obj;
4191 INIT_WORK(&work->work, intel_unpin_work_fn);
4192
4193 /* We borrow the event spin lock for protecting unpin_work */
4194 spin_lock_irqsave(&dev->event_lock, flags);
4195 if (intel_crtc->unpin_work) {
4196 spin_unlock_irqrestore(&dev->event_lock, flags);
4197 kfree(work);
4198 mutex_unlock(&dev->struct_mutex);
4199 return -EBUSY;
4200 }
4201 intel_crtc->unpin_work = work;
4202 spin_unlock_irqrestore(&dev->event_lock, flags);
4203
4204 intel_fb = to_intel_framebuffer(fb);
4205 obj = intel_fb->obj;
4206
4207 ret = intel_pin_and_fence_fb_obj(dev, obj);
4208 if (ret != 0) {
4209 kfree(work);
4210 mutex_unlock(&dev->struct_mutex);
4211 return ret;
4212 }
4213
4214 /* Reference the old fb object for the scheduled work. */
4215 drm_gem_object_reference(work->obj);
4216
4217 crtc->fb = fb;
4218 i915_gem_object_flush_write_domain(obj);
4219 drm_vblank_get(dev, intel_crtc->pipe);
4220 obj_priv = obj->driver_private;
4221 atomic_inc(&obj_priv->pending_flip);
4222
4223 BEGIN_LP_RING(4);
4224 OUT_RING(MI_DISPLAY_FLIP |
4225 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
4226 OUT_RING(fb->pitch);
4227 if (IS_I965G(dev)) {
4228 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
4229 OUT_RING((fb->width << 16) | fb->height);
4230 } else {
4231 OUT_RING(obj_priv->gtt_offset);
4232 OUT_RING(MI_NOOP);
4233 }
4234 ADVANCE_LP_RING();
4235
4236 mutex_unlock(&dev->struct_mutex);
4237
4238 return 0;
4239}
4240
3970static const struct drm_crtc_helper_funcs intel_helper_funcs = { 4241static const struct drm_crtc_helper_funcs intel_helper_funcs = {
3971 .dpms = intel_crtc_dpms, 4242 .dpms = intel_crtc_dpms,
3972 .mode_fixup = intel_crtc_mode_fixup, 4243 .mode_fixup = intel_crtc_mode_fixup,
@@ -3983,11 +4254,13 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
3983 .gamma_set = intel_crtc_gamma_set, 4254 .gamma_set = intel_crtc_gamma_set,
3984 .set_config = drm_crtc_helper_set_config, 4255 .set_config = drm_crtc_helper_set_config,
3985 .destroy = intel_crtc_destroy, 4256 .destroy = intel_crtc_destroy,
4257 .page_flip = intel_crtc_page_flip,
3986}; 4258};
3987 4259
3988 4260
3989static void intel_crtc_init(struct drm_device *dev, int pipe) 4261static void intel_crtc_init(struct drm_device *dev, int pipe)
3990{ 4262{
4263 drm_i915_private_t *dev_priv = dev->dev_private;
3991 struct intel_crtc *intel_crtc; 4264 struct intel_crtc *intel_crtc;
3992 int i; 4265 int i;
3993 4266
@@ -4010,10 +4283,15 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
4010 intel_crtc->pipe = pipe; 4283 intel_crtc->pipe = pipe;
4011 intel_crtc->plane = pipe; 4284 intel_crtc->plane = pipe;
4012 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) { 4285 if (IS_MOBILE(dev) && (IS_I9XX(dev) && !IS_I965G(dev))) {
4013 DRM_DEBUG("swapping pipes & planes for FBC\n"); 4286 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
4014 intel_crtc->plane = ((pipe == 0) ? 1 : 0); 4287 intel_crtc->plane = ((pipe == 0) ? 1 : 0);
4015 } 4288 }
4016 4289
4290 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
4291 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
4292 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
4293 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
4294
4017 intel_crtc->cursor_addr = 0; 4295 intel_crtc->cursor_addr = 0;
4018 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF; 4296 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4019 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 4297 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
@@ -4090,7 +4368,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4090 if (IS_MOBILE(dev) && !IS_I830(dev)) 4368 if (IS_MOBILE(dev) && !IS_I830(dev))
4091 intel_lvds_init(dev); 4369 intel_lvds_init(dev);
4092 4370
4093 if (IS_IGDNG(dev)) { 4371 if (IS_IRONLAKE(dev)) {
4094 int found; 4372 int found;
4095 4373
4096 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED)) 4374 if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4118,7 +4396,7 @@ static void intel_setup_outputs(struct drm_device *dev)
4118 if (I915_READ(PCH_DP_D) & DP_DETECTED) 4396 if (I915_READ(PCH_DP_D) & DP_DETECTED)
4119 intel_dp_init(dev, PCH_DP_D); 4397 intel_dp_init(dev, PCH_DP_D);
4120 4398
4121 } else if (IS_I9XX(dev)) { 4399 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
4122 bool found = false; 4400 bool found = false;
4123 4401
4124 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4402 if (I915_READ(SDVOB) & SDVO_DETECTED) {
@@ -4145,10 +4423,10 @@ static void intel_setup_outputs(struct drm_device *dev)
4145 4423
4146 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4424 if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED))
4147 intel_dp_init(dev, DP_D); 4425 intel_dp_init(dev, DP_D);
4148 } else 4426 } else if (IS_I8XX(dev))
4149 intel_dvo_init(dev); 4427 intel_dvo_init(dev);
4150 4428
4151 if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev)) 4429 if (SUPPORTS_TV(dev))
4152 intel_tv_init(dev); 4430 intel_tv_init(dev);
4153 4431
4154 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 4432 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -4257,7 +4535,7 @@ void intel_init_clock_gating(struct drm_device *dev)
4257 * Disable clock gating reported to work incorrectly according to the 4535 * Disable clock gating reported to work incorrectly according to the
4258 * specs, but enable as much else as we can. 4536 * specs, but enable as much else as we can.
4259 */ 4537 */
4260 if (IS_IGDNG(dev)) { 4538 if (IS_IRONLAKE(dev)) {
4261 return; 4539 return;
4262 } else if (IS_G4X(dev)) { 4540 } else if (IS_G4X(dev)) {
4263 uint32_t dspclk_gate; 4541 uint32_t dspclk_gate;
@@ -4291,11 +4569,52 @@ void intel_init_clock_gating(struct drm_device *dev)
4291 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 4569 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
4292 DSTATE_DOT_CLOCK_GATING; 4570 DSTATE_DOT_CLOCK_GATING;
4293 I915_WRITE(D_STATE, dstate); 4571 I915_WRITE(D_STATE, dstate);
4294 } else if (IS_I855(dev) || IS_I865G(dev)) { 4572 } else if (IS_I85X(dev) || IS_I865G(dev)) {
4295 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 4573 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
4296 } else if (IS_I830(dev)) { 4574 } else if (IS_I830(dev)) {
4297 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 4575 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
4298 } 4576 }
4577
4578 /*
4579 * GPU can automatically power down the render unit if given a page
4580 * to save state.
4581 */
4582 if (I915_HAS_RC6(dev)) {
4583 struct drm_gem_object *pwrctx;
4584 struct drm_i915_gem_object *obj_priv;
4585 int ret;
4586
4587 if (dev_priv->pwrctx) {
4588 obj_priv = dev_priv->pwrctx->driver_private;
4589 } else {
4590 pwrctx = drm_gem_object_alloc(dev, 4096);
4591 if (!pwrctx) {
4592 DRM_DEBUG("failed to alloc power context, "
4593 "RC6 disabled\n");
4594 goto out;
4595 }
4596
4597 ret = i915_gem_object_pin(pwrctx, 4096);
4598 if (ret) {
4599 DRM_ERROR("failed to pin power context: %d\n",
4600 ret);
4601 drm_gem_object_unreference(pwrctx);
4602 goto out;
4603 }
4604
4605 i915_gem_object_set_to_gtt_domain(pwrctx, 1);
4606
4607 dev_priv->pwrctx = pwrctx;
4608 obj_priv = pwrctx->driver_private;
4609 }
4610
4611 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
4612 I915_WRITE(MCHBAR_RENDER_STANDBY,
4613 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
4614 }
4615
4616out:
4617 return;
4299} 4618}
4300 4619
4301/* Set up chip specific display functions */ 4620/* Set up chip specific display functions */
@@ -4304,8 +4623,8 @@ static void intel_init_display(struct drm_device *dev)
4304 struct drm_i915_private *dev_priv = dev->dev_private; 4623 struct drm_i915_private *dev_priv = dev->dev_private;
4305 4624
4306 /* We always want a DPMS function */ 4625 /* We always want a DPMS function */
4307 if (IS_IGDNG(dev)) 4626 if (IS_IRONLAKE(dev))
4308 dev_priv->display.dpms = igdng_crtc_dpms; 4627 dev_priv->display.dpms = ironlake_crtc_dpms;
4309 else 4628 else
4310 dev_priv->display.dpms = i9xx_crtc_dpms; 4629 dev_priv->display.dpms = i9xx_crtc_dpms;
4311 4630
@@ -4324,13 +4643,13 @@ static void intel_init_display(struct drm_device *dev)
4324 } 4643 }
4325 4644
4326 /* Returns the core display clock speed */ 4645 /* Returns the core display clock speed */
4327 if (IS_I945G(dev)) 4646 if (IS_I945G(dev) || (IS_G33(dev) && ! IS_PINEVIEW_M(dev)))
4328 dev_priv->display.get_display_clock_speed = 4647 dev_priv->display.get_display_clock_speed =
4329 i945_get_display_clock_speed; 4648 i945_get_display_clock_speed;
4330 else if (IS_I915G(dev)) 4649 else if (IS_I915G(dev))
4331 dev_priv->display.get_display_clock_speed = 4650 dev_priv->display.get_display_clock_speed =
4332 i915_get_display_clock_speed; 4651 i915_get_display_clock_speed;
4333 else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev)) 4652 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
4334 dev_priv->display.get_display_clock_speed = 4653 dev_priv->display.get_display_clock_speed =
4335 i9xx_misc_get_display_clock_speed; 4654 i9xx_misc_get_display_clock_speed;
4336 else if (IS_I915GM(dev)) 4655 else if (IS_I915GM(dev))
@@ -4339,7 +4658,7 @@ static void intel_init_display(struct drm_device *dev)
4339 else if (IS_I865G(dev)) 4658 else if (IS_I865G(dev))
4340 dev_priv->display.get_display_clock_speed = 4659 dev_priv->display.get_display_clock_speed =
4341 i865_get_display_clock_speed; 4660 i865_get_display_clock_speed;
4342 else if (IS_I855(dev)) 4661 else if (IS_I85X(dev))
4343 dev_priv->display.get_display_clock_speed = 4662 dev_priv->display.get_display_clock_speed =
4344 i855_get_display_clock_speed; 4663 i855_get_display_clock_speed;
4345 else /* 852, 830 */ 4664 else /* 852, 830 */
@@ -4347,7 +4666,7 @@ static void intel_init_display(struct drm_device *dev)
4347 i830_get_display_clock_speed; 4666 i830_get_display_clock_speed;
4348 4667
4349 /* For FIFO watermark updates */ 4668 /* For FIFO watermark updates */
4350 if (IS_IGDNG(dev)) 4669 if (IS_IRONLAKE(dev))
4351 dev_priv->display.update_wm = NULL; 4670 dev_priv->display.update_wm = NULL;
4352 else if (IS_G4X(dev)) 4671 else if (IS_G4X(dev))
4353 dev_priv->display.update_wm = g4x_update_wm; 4672 dev_priv->display.update_wm = g4x_update_wm;
@@ -4403,7 +4722,7 @@ void intel_modeset_init(struct drm_device *dev)
4403 num_pipe = 2; 4722 num_pipe = 2;
4404 else 4723 else
4405 num_pipe = 1; 4724 num_pipe = 1;
4406 DRM_DEBUG("%d display pipe%s available.\n", 4725 DRM_DEBUG_KMS("%d display pipe%s available.\n",
4407 num_pipe, num_pipe > 1 ? "s" : ""); 4726 num_pipe, num_pipe > 1 ? "s" : "");
4408 4727
4409 if (IS_I85X(dev)) 4728 if (IS_I85X(dev))
@@ -4422,6 +4741,15 @@ void intel_modeset_init(struct drm_device *dev)
4422 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 4741 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
4423 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 4742 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
4424 (unsigned long)dev); 4743 (unsigned long)dev);
4744
4745 intel_setup_overlay(dev);
4746
4747 if (IS_PINEVIEW(dev) && !intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
4748 dev_priv->fsb_freq,
4749 dev_priv->mem_freq))
4750 DRM_INFO("failed to find known CxSR latency "
4751 "(found fsb freq %d, mem freq %d), disabling CxSR\n",
4752 dev_priv->fsb_freq, dev_priv->mem_freq);
4425} 4753}
4426 4754
4427void intel_modeset_cleanup(struct drm_device *dev) 4755void intel_modeset_cleanup(struct drm_device *dev)
@@ -4445,11 +4773,21 @@ void intel_modeset_cleanup(struct drm_device *dev)
4445 intel_increase_renderclock(dev, false); 4773 intel_increase_renderclock(dev, false);
4446 del_timer_sync(&dev_priv->idle_timer); 4774 del_timer_sync(&dev_priv->idle_timer);
4447 4775
4448 mutex_unlock(&dev->struct_mutex);
4449
4450 if (dev_priv->display.disable_fbc) 4776 if (dev_priv->display.disable_fbc)
4451 dev_priv->display.disable_fbc(dev); 4777 dev_priv->display.disable_fbc(dev);
4452 4778
4779 if (dev_priv->pwrctx) {
4780 struct drm_i915_gem_object *obj_priv;
4781
4782 obj_priv = dev_priv->pwrctx->driver_private;
4783 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
4784 I915_READ(PWRCTXA);
4785 i915_gem_object_unpin(dev_priv->pwrctx);
4786 drm_gem_object_unreference(dev_priv->pwrctx);
4787 }
4788
4789 mutex_unlock(&dev->struct_mutex);
4790
4453 drm_mode_config_cleanup(dev); 4791 drm_mode_config_cleanup(dev);
4454} 4792}
4455 4793
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d83447557f9b..4e7aa8b7b938 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -33,7 +33,8 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36#include "intel_dp.h" 36#include "drm_dp_helper.h"
37
37 38
38#define DP_LINK_STATUS_SIZE 6 39#define DP_LINK_STATUS_SIZE 6
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 40#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
223 */ 224 */
224 if (IS_eDP(intel_output)) 225 if (IS_eDP(intel_output))
225 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 226 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
226 else if (IS_IGDNG(dev)) 227 else if (IS_IRONLAKE(dev))
227 aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */ 228 aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
228 else 229 else
229 aux_clock_divider = intel_hrawclk(dev) / 2; 230 aux_clock_divider = intel_hrawclk(dev) / 2;
230 231
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
282 /* Timeouts occur when the device isn't connected, so they're 283 /* Timeouts occur when the device isn't connected, so they're
283 * "normal" -- don't fill the kernel log with these */ 284 * "normal" -- don't fill the kernel log with these */
284 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 285 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
285 DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status); 286 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
286 return -ETIMEDOUT; 287 return -ETIMEDOUT;
287 } 288 }
288 289
@@ -382,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
382} 383}
383 384
384static int 385static int
385intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, 386intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
386 uint8_t *send, int send_bytes, 387 uint8_t write_byte, uint8_t *read_byte)
387 uint8_t *recv, int recv_bytes)
388{ 388{
389 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
389 struct intel_dp_priv *dp_priv = container_of(adapter, 390 struct intel_dp_priv *dp_priv = container_of(adapter,
390 struct intel_dp_priv, 391 struct intel_dp_priv,
391 adapter); 392 adapter);
392 struct intel_output *intel_output = dp_priv->intel_output; 393 struct intel_output *intel_output = dp_priv->intel_output;
394 uint16_t address = algo_data->address;
395 uint8_t msg[5];
396 uint8_t reply[2];
397 int msg_bytes;
398 int reply_bytes;
399 int ret;
400
401 /* Set up the command byte */
402 if (mode & MODE_I2C_READ)
403 msg[0] = AUX_I2C_READ << 4;
404 else
405 msg[0] = AUX_I2C_WRITE << 4;
406
407 if (!(mode & MODE_I2C_STOP))
408 msg[0] |= AUX_I2C_MOT << 4;
393 409
394 return intel_dp_aux_ch(intel_output, 410 msg[1] = address >> 8;
395 send, send_bytes, recv, recv_bytes); 411 msg[2] = address;
412
413 switch (mode) {
414 case MODE_I2C_WRITE:
415 msg[3] = 0;
416 msg[4] = write_byte;
417 msg_bytes = 5;
418 reply_bytes = 1;
419 break;
420 case MODE_I2C_READ:
421 msg[3] = 0;
422 msg_bytes = 4;
423 reply_bytes = 2;
424 break;
425 default:
426 msg_bytes = 3;
427 reply_bytes = 1;
428 break;
429 }
430
431 for (;;) {
432 ret = intel_dp_aux_ch(intel_output,
433 msg, msg_bytes,
434 reply, reply_bytes);
435 if (ret < 0) {
436 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
437 return ret;
438 }
439 switch (reply[0] & AUX_I2C_REPLY_MASK) {
440 case AUX_I2C_REPLY_ACK:
441 if (mode == MODE_I2C_READ) {
442 *read_byte = reply[1];
443 }
444 return reply_bytes - 1;
445 case AUX_I2C_REPLY_NACK:
446 DRM_DEBUG_KMS("aux_ch nack\n");
447 return -EREMOTEIO;
448 case AUX_I2C_REPLY_DEFER:
449 DRM_DEBUG_KMS("aux_ch defer\n");
450 udelay(100);
451 break;
452 default:
453 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
454 return -EREMOTEIO;
455 }
456 }
396} 457}
397 458
398static int 459static int
@@ -435,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
435 dp_priv->link_bw = bws[clock]; 496 dp_priv->link_bw = bws[clock];
436 dp_priv->lane_count = lane_count; 497 dp_priv->lane_count = lane_count;
437 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw); 498 adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
438 DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n", 499 DRM_DEBUG_KMS("Display port link bw %02x lane "
500 "count %d clock %d\n",
439 dp_priv->link_bw, dp_priv->lane_count, 501 dp_priv->link_bw, dp_priv->lane_count,
440 adjusted_mode->clock); 502 adjusted_mode->clock);
441 return true; 503 return true;
@@ -514,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
514 intel_dp_compute_m_n(3, lane_count, 576 intel_dp_compute_m_n(3, lane_count,
515 mode->clock, adjusted_mode->clock, &m_n); 577 mode->clock, adjusted_mode->clock, &m_n);
516 578
517 if (IS_IGDNG(dev)) { 579 if (IS_IRONLAKE(dev)) {
518 if (intel_crtc->pipe == 0) { 580 if (intel_crtc->pipe == 0) {
519 I915_WRITE(TRANSA_DATA_M1, 581 I915_WRITE(TRANSA_DATA_M1,
520 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 582 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
606 } 668 }
607} 669}
608 670
609static void igdng_edp_backlight_on (struct drm_device *dev) 671static void ironlake_edp_backlight_on (struct drm_device *dev)
610{ 672{
611 struct drm_i915_private *dev_priv = dev->dev_private; 673 struct drm_i915_private *dev_priv = dev->dev_private;
612 u32 pp; 674 u32 pp;
613 675
614 DRM_DEBUG("\n"); 676 DRM_DEBUG_KMS("\n");
615 pp = I915_READ(PCH_PP_CONTROL); 677 pp = I915_READ(PCH_PP_CONTROL);
616 pp |= EDP_BLC_ENABLE; 678 pp |= EDP_BLC_ENABLE;
617 I915_WRITE(PCH_PP_CONTROL, pp); 679 I915_WRITE(PCH_PP_CONTROL, pp);
618} 680}
619 681
620static void igdng_edp_backlight_off (struct drm_device *dev) 682static void ironlake_edp_backlight_off (struct drm_device *dev)
621{ 683{
622 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = dev->dev_private;
623 u32 pp; 685 u32 pp;
624 686
625 DRM_DEBUG("\n"); 687 DRM_DEBUG_KMS("\n");
626 pp = I915_READ(PCH_PP_CONTROL); 688 pp = I915_READ(PCH_PP_CONTROL);
627 pp &= ~EDP_BLC_ENABLE; 689 pp &= ~EDP_BLC_ENABLE;
628 I915_WRITE(PCH_PP_CONTROL, pp); 690 I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
641 if (dp_reg & DP_PORT_EN) { 703 if (dp_reg & DP_PORT_EN) {
642 intel_dp_link_down(intel_output, dp_priv->DP); 704 intel_dp_link_down(intel_output, dp_priv->DP);
643 if (IS_eDP(intel_output)) 705 if (IS_eDP(intel_output))
644 igdng_edp_backlight_off(dev); 706 ironlake_edp_backlight_off(dev);
645 } 707 }
646 } else { 708 } else {
647 if (!(dp_reg & DP_PORT_EN)) { 709 if (!(dp_reg & DP_PORT_EN)) {
648 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration); 710 intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
649 if (IS_eDP(intel_output)) 711 if (IS_eDP(intel_output))
650 igdng_edp_backlight_on(dev); 712 ironlake_edp_backlight_on(dev);
651 } 713 }
652 } 714 }
653 dp_priv->dpms_mode = mode; 715 dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
1010 struct drm_i915_private *dev_priv = dev->dev_private; 1072 struct drm_i915_private *dev_priv = dev->dev_private;
1011 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1073 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
1012 1074
1013 DRM_DEBUG("\n"); 1075 DRM_DEBUG_KMS("\n");
1014 1076
1015 if (IS_eDP(intel_output)) { 1077 if (IS_eDP(intel_output)) {
1016 DP &= ~DP_PLL_ENABLE; 1078 DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
1071} 1133}
1072 1134
1073static enum drm_connector_status 1135static enum drm_connector_status
1074igdng_dp_detect(struct drm_connector *connector) 1136ironlake_dp_detect(struct drm_connector *connector)
1075{ 1137{
1076 struct intel_output *intel_output = to_intel_output(connector); 1138 struct intel_output *intel_output = to_intel_output(connector);
1077 struct intel_dp_priv *dp_priv = intel_output->dev_priv; 1139 struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector)
1106 1168
1107 dp_priv->has_audio = false; 1169 dp_priv->has_audio = false;
1108 1170
1109 if (IS_IGDNG(dev)) 1171 if (IS_IRONLAKE(dev))
1110 return igdng_dp_detect(connector); 1172 return ironlake_dp_detect(connector);
1111 1173
1112 temp = I915_READ(PORT_HOTPLUG_EN); 1174 temp = I915_READ(PORT_HOTPLUG_EN);
1113 1175
@@ -1227,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output)
1227 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON) 1289 if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
1228 intel_dp_check_link_status(intel_output); 1290 intel_dp_check_link_status(intel_output);
1229} 1291}
1230 1292/*
1293 * Enumerate the child dev array parsed from VBT to check whether
1294 * the given DP is present.
1295 * If it is present, return 1.
1296 * If it is not present, return false.
1297 * If no child dev is parsed from VBT, it is assumed that the given
1298 * DP is present.
1299 */
1300static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
1301{
1302 struct drm_i915_private *dev_priv = dev->dev_private;
1303 struct child_device_config *p_child;
1304 int i, dp_port, ret;
1305
1306 if (!dev_priv->child_dev_num)
1307 return 1;
1308
1309 dp_port = 0;
1310 if (dp_reg == DP_B || dp_reg == PCH_DP_B)
1311 dp_port = PORT_IDPB;
1312 else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
1313 dp_port = PORT_IDPC;
1314 else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
1315 dp_port = PORT_IDPD;
1316
1317 ret = 0;
1318 for (i = 0; i < dev_priv->child_dev_num; i++) {
1319 p_child = dev_priv->child_dev + i;
1320 /*
1321 * If the device type is not DP, continue.
1322 */
1323 if (p_child->device_type != DEVICE_TYPE_DP &&
1324 p_child->device_type != DEVICE_TYPE_eDP)
1325 continue;
1326 /* Find the eDP port */
1327 if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
1328 ret = 1;
1329 break;
1330 }
1331 /* Find the DP port */
1332 if (p_child->dvo_port == dp_port) {
1333 ret = 1;
1334 break;
1335 }
1336 }
1337 return ret;
1338}
1231void 1339void
1232intel_dp_init(struct drm_device *dev, int output_reg) 1340intel_dp_init(struct drm_device *dev, int output_reg)
1233{ 1341{
@@ -1237,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1237 struct intel_dp_priv *dp_priv; 1345 struct intel_dp_priv *dp_priv;
1238 const char *name = NULL; 1346 const char *name = NULL;
1239 1347
1348 if (!dp_is_present_in_vbt(dev, output_reg)) {
1349 DRM_DEBUG_KMS("DP is not present. Ignore it\n");
1350 return;
1351 }
1240 intel_output = kcalloc(sizeof(struct intel_output) + 1352 intel_output = kcalloc(sizeof(struct intel_output) +
1241 sizeof(struct intel_dp_priv), 1, GFP_KERNEL); 1353 sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
1242 if (!intel_output) 1354 if (!intel_output)
@@ -1254,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
1254 else 1366 else
1255 intel_output->type = INTEL_OUTPUT_DISPLAYPORT; 1367 intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
1256 1368
1257 if (output_reg == DP_B) 1369 if (output_reg == DP_B || output_reg == PCH_DP_B)
1258 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT); 1370 intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
1259 else if (output_reg == DP_C) 1371 else if (output_reg == DP_C || output_reg == PCH_DP_C)
1260 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT); 1372 intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
1261 else if (output_reg == DP_D) 1373 else if (output_reg == DP_D || output_reg == PCH_DP_D)
1262 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); 1374 intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
1263 1375
1264 if (IS_eDP(intel_output)) { 1376 if (IS_eDP(intel_output)) {
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ef61fe9507e2..a51573da1ff6 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -110,6 +110,32 @@ struct intel_output {
110 int clone_mask; 110 int clone_mask;
111}; 111};
112 112
113struct intel_crtc;
114struct intel_overlay {
115 struct drm_device *dev;
116 struct intel_crtc *crtc;
117 struct drm_i915_gem_object *vid_bo;
118 struct drm_i915_gem_object *old_vid_bo;
119 int active;
120 int pfit_active;
121 u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
122 u32 color_key;
123 u32 brightness, contrast, saturation;
124 u32 old_xscale, old_yscale;
125 /* register access */
126 u32 flip_addr;
127 struct drm_i915_gem_object *reg_bo;
128 void *virt_addr;
129 /* flip handling */
130 uint32_t last_flip_req;
131 int hw_wedged;
132#define HW_WEDGED 1
133#define NEEDS_WAIT_FOR_FLIP 2
134#define RELEASE_OLD_VID 3
135#define SWITCH_OFF_STAGE_1 4
136#define SWITCH_OFF_STAGE_2 5
137};
138
113struct intel_crtc { 139struct intel_crtc {
114 struct drm_crtc base; 140 struct drm_crtc base;
115 enum pipe pipe; 141 enum pipe pipe;
@@ -121,6 +147,8 @@ struct intel_crtc {
121 bool busy; /* is scanout buffer being updated frequently? */ 147 bool busy; /* is scanout buffer being updated frequently? */
122 struct timer_list idle_timer; 148 struct timer_list idle_timer;
123 bool lowfreq_avail; 149 bool lowfreq_avail;
150 struct intel_overlay *overlay;
151 struct intel_unpin_work *unpin_work;
124}; 152};
125 153
126#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 154#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
134int intel_ddc_get_modes(struct intel_output *intel_output); 162int intel_ddc_get_modes(struct intel_output *intel_output);
135extern bool intel_ddc_probe(struct intel_output *intel_output); 163extern bool intel_ddc_probe(struct intel_output *intel_output);
136void intel_i2c_quirk_set(struct drm_device *dev, bool enable); 164void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
165void intel_i2c_reset_gmbus(struct drm_device *dev);
166
137extern void intel_crt_init(struct drm_device *dev); 167extern void intel_crt_init(struct drm_device *dev);
138extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg); 168extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
139extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 169extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
148extern void intel_edp_link_config (struct intel_output *, int *, int *); 178extern void intel_edp_link_config (struct intel_output *, int *, int *);
149 179
150 180
181extern int intel_panel_fitter_pipe (struct drm_device *dev);
151extern void intel_crtc_load_lut(struct drm_crtc *crtc); 182extern void intel_crtc_load_lut(struct drm_crtc *crtc);
152extern void intel_encoder_prepare (struct drm_encoder *encoder); 183extern void intel_encoder_prepare (struct drm_encoder *encoder);
153extern void intel_encoder_commit (struct drm_encoder *encoder); 184extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
177 u16 blue, int regno); 208 u16 blue, int regno);
178extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 209extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
179 u16 *blue, int regno); 210 u16 *blue, int regno);
211extern void intel_init_clock_gating(struct drm_device *dev);
180 212
181extern int intel_framebuffer_create(struct drm_device *dev, 213extern int intel_framebuffer_create(struct drm_device *dev,
182 struct drm_mode_fb_cmd *mode_cmd, 214 struct drm_mode_fb_cmd *mode_cmd,
183 struct drm_framebuffer **fb, 215 struct drm_framebuffer **fb,
184 struct drm_gem_object *obj); 216 struct drm_gem_object *obj);
185 217
218extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
219extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
220
221extern void intel_setup_overlay(struct drm_device *dev);
222extern void intel_cleanup_overlay(struct drm_device *dev);
223extern int intel_overlay_switch_off(struct intel_overlay *overlay);
224extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
225 int interruptible);
226extern int intel_overlay_put_image(struct drm_device *dev, void *data,
227 struct drm_file *file_priv);
228extern int intel_overlay_attrs(struct drm_device *dev, void *data,
229 struct drm_file *file_priv);
186#endif /* __INTEL_DRV_H__ */ 230#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 40fcf6fdef38..371d753e362b 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
230 par->intel_fb = intel_fb; 230 par->intel_fb = intel_fb;
231 231
232 /* To allow resizeing without swapping buffers */ 232 /* To allow resizeing without swapping buffers */
233 DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width, 233 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
234 intel_fb->base.height, obj_priv->gtt_offset, fbo); 234 intel_fb->base.width, intel_fb->base.height,
235 obj_priv->gtt_offset, fbo);
235 236
236 mutex_unlock(&dev->struct_mutex); 237 mutex_unlock(&dev->struct_mutex);
237 return 0; 238 return 0;
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
249{ 250{
250 int ret; 251 int ret;
251 252
252 DRM_DEBUG("\n"); 253 DRM_DEBUG_KMS("\n");
253 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); 254 ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
254 return ret; 255 return ret;
255} 256}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index c33451aec1bd..f04dbbe7d400 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but 82 /* HW workaround, need to toggle enable bit off and on for 12bpc, but
83 * we do this anyway which shows more stable in testing. 83 * we do this anyway which shows more stable in testing.
84 */ 84 */
85 if (IS_IGDNG(dev)) { 85 if (IS_IRONLAKE(dev)) {
86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE); 86 I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
87 POSTING_READ(hdmi_priv->sdvox_reg); 87 POSTING_READ(hdmi_priv->sdvox_reg);
88 } 88 }
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
99 /* HW workaround, need to write this twice for issue that may result 99 /* HW workaround, need to write this twice for issue that may result
100 * in first write getting masked. 100 * in first write getting masked.
101 */ 101 */
102 if (IS_IGDNG(dev)) { 102 if (IS_IRONLAKE(dev)) {
103 I915_WRITE(hdmi_priv->sdvox_reg, temp); 103 I915_WRITE(hdmi_priv->sdvox_reg, temp);
104 POSTING_READ(hdmi_priv->sdvox_reg); 104 POSTING_READ(hdmi_priv->sdvox_reg);
105 } 105 }
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
225 .destroy = intel_hdmi_enc_destroy, 225 .destroy = intel_hdmi_enc_destroy,
226}; 226};
227 227
228 228/*
229 * Enumerate the child dev array parsed from VBT to check whether
230 * the given HDMI is present.
231 * If it is present, return 1.
232 * If it is not present, return false.
233 * If no child dev is parsed from VBT, it assumes that the given
234 * HDMI is present.
235 */
236static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
237{
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 struct child_device_config *p_child;
240 int i, hdmi_port, ret;
241
242 if (!dev_priv->child_dev_num)
243 return 1;
244
245 if (hdmi_reg == SDVOB)
246 hdmi_port = DVO_B;
247 else if (hdmi_reg == SDVOC)
248 hdmi_port = DVO_C;
249 else if (hdmi_reg == HDMIB)
250 hdmi_port = DVO_B;
251 else if (hdmi_reg == HDMIC)
252 hdmi_port = DVO_C;
253 else if (hdmi_reg == HDMID)
254 hdmi_port = DVO_D;
255 else
256 return 0;
257
258 ret = 0;
259 for (i = 0; i < dev_priv->child_dev_num; i++) {
260 p_child = dev_priv->child_dev + i;
261 /*
262 * If the device type is not HDMI, continue.
263 */
264 if (p_child->device_type != DEVICE_TYPE_HDMI)
265 continue;
266 /* Find the HDMI port */
267 if (p_child->dvo_port == hdmi_port) {
268 ret = 1;
269 break;
270 }
271 }
272 return ret;
273}
229void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) 274void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
230{ 275{
231 struct drm_i915_private *dev_priv = dev->dev_private; 276 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
233 struct intel_output *intel_output; 278 struct intel_output *intel_output;
234 struct intel_hdmi_priv *hdmi_priv; 279 struct intel_hdmi_priv *hdmi_priv;
235 280
281 if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
282 DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
283 return;
284 }
236 intel_output = kcalloc(sizeof(struct intel_output) + 285 intel_output = kcalloc(sizeof(struct intel_output) +
237 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL); 286 sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
238 if (!intel_output) 287 if (!intel_output)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c7eab724c418..8673c735b8ab 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 40
41 /* When using bit bashing for I2C, this bit needs to be set to 1 */ 41 /* When using bit bashing for I2C, this bit needs to be set to 1 */
42 if (!IS_IGD(dev)) 42 if (!IS_PINEVIEW(dev))
43 return; 43 return;
44 if (enable) 44 if (enable)
45 I915_WRITE(DSPCLK_GATE_D, 45 I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high)
118 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */ 118 udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
119} 119}
120 120
121/* Clears the GMBUS setup. Our driver doesn't make use of the GMBUS I2C
122 * engine, but if the BIOS leaves it enabled, then that can break our use
123 * of the bit-banging I2C interfaces. This is notably the case with the
124 * Mac Mini in EFI mode.
125 */
126void
127intel_i2c_reset_gmbus(struct drm_device *dev)
128{
129 struct drm_i915_private *dev_priv = dev->dev_private;
130
131 if (IS_IRONLAKE(dev)) {
132 I915_WRITE(PCH_GMBUS0, 0);
133 } else {
134 I915_WRITE(GMBUS0, 0);
135 }
136}
137
121/** 138/**
122 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg 139 * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
123 * @dev: DRM device 140 * @dev: DRM device
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
168 if(i2c_bit_add_bus(&chan->adapter)) 185 if(i2c_bit_add_bus(&chan->adapter))
169 goto out_free; 186 goto out_free;
170 187
188 intel_i2c_reset_gmbus(dev);
189
171 /* JJJ: raise SCL and SDA? */ 190 /* JJJ: raise SCL and SDA? */
172 intel_i2c_quirk_set(dev, true); 191 intel_i2c_quirk_set(dev, true);
173 set_data(chan, 1); 192 set_data(chan, 1);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index eb365021bb5a..3118ce274e67 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
56 struct drm_i915_private *dev_priv = dev->dev_private; 56 struct drm_i915_private *dev_priv = dev->dev_private;
57 u32 blc_pwm_ctl, reg; 57 u32 blc_pwm_ctl, reg;
58 58
59 if (IS_IGDNG(dev)) 59 if (IS_IRONLAKE(dev))
60 reg = BLC_PWM_CPU_CTL; 60 reg = BLC_PWM_CPU_CTL;
61 else 61 else
62 reg = BLC_PWM_CTL; 62 reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
75 u32 reg; 75 u32 reg;
76 76
77 if (IS_IGDNG(dev)) 77 if (IS_IRONLAKE(dev))
78 reg = BLC_PWM_PCH_CTL2; 78 reg = BLC_PWM_PCH_CTL2;
79 else 79 else
80 reg = BLC_PWM_CTL; 80 reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
91 struct drm_i915_private *dev_priv = dev->dev_private; 91 struct drm_i915_private *dev_priv = dev->dev_private;
92 u32 pp_status, ctl_reg, status_reg; 92 u32 pp_status, ctl_reg, status_reg;
93 93
94 if (IS_IGDNG(dev)) { 94 if (IS_IRONLAKE(dev)) {
95 ctl_reg = PCH_PP_CONTROL; 95 ctl_reg = PCH_PP_CONTROL;
96 status_reg = PCH_PP_STATUS; 96 status_reg = PCH_PP_STATUS;
97 } else { 97 } else {
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 137 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
138 u32 pwm_ctl_reg; 138 u32 pwm_ctl_reg;
139 139
140 if (IS_IGDNG(dev)) { 140 if (IS_IRONLAKE(dev)) {
141 pp_on_reg = PCH_PP_ON_DELAYS; 141 pp_on_reg = PCH_PP_ON_DELAYS;
142 pp_off_reg = PCH_PP_OFF_DELAYS; 142 pp_off_reg = PCH_PP_OFF_DELAYS;
143 pp_ctl_reg = PCH_PP_CONTROL; 143 pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg; 174 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
175 u32 pwm_ctl_reg; 175 u32 pwm_ctl_reg;
176 176
177 if (IS_IGDNG(dev)) { 177 if (IS_IRONLAKE(dev)) {
178 pp_on_reg = PCH_PP_ON_DELAYS; 178 pp_on_reg = PCH_PP_ON_DELAYS;
179 pp_off_reg = PCH_PP_OFF_DELAYS; 179 pp_off_reg = PCH_PP_OFF_DELAYS;
180 pp_ctl_reg = PCH_PP_CONTROL; 180 pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
297 } 297 }
298 298
299 /* full screen scale for now */ 299 /* full screen scale for now */
300 if (IS_IGDNG(dev)) 300 if (IS_IRONLAKE(dev))
301 goto out; 301 goto out;
302 302
303 /* 965+ wants fuzzy fitting */ 303 /* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
327 * to register description and PRM. 327 * to register description and PRM.
328 * Change the value here to see the borders for debugging 328 * Change the value here to see the borders for debugging
329 */ 329 */
330 if (!IS_IGDNG(dev)) { 330 if (!IS_IRONLAKE(dev)) {
331 I915_WRITE(BCLRPAT_A, 0); 331 I915_WRITE(BCLRPAT_A, 0);
332 I915_WRITE(BCLRPAT_B, 0); 332 I915_WRITE(BCLRPAT_B, 0);
333 } 333 }
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
548 struct drm_i915_private *dev_priv = dev->dev_private; 548 struct drm_i915_private *dev_priv = dev->dev_private;
549 u32 reg; 549 u32 reg;
550 550
551 if (IS_IGDNG(dev)) 551 if (IS_IRONLAKE(dev))
552 reg = BLC_PWM_CPU_CTL; 552 reg = BLC_PWM_CPU_CTL;
553 else 553 else
554 reg = BLC_PWM_CTL; 554 reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
587 * settings. 587 * settings.
588 */ 588 */
589 589
590 if (IS_IGDNG(dev)) 590 if (IS_IRONLAKE(dev))
591 return; 591 return;
592 592
593 /* 593 /*
@@ -914,6 +914,101 @@ static int intel_lid_present(void)
914#endif 914#endif
915 915
916/** 916/**
917 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
918 * @dev: drm device
919 * @connector: LVDS connector
920 *
921 * Find the reduced downclock for LVDS in EDID.
922 */
923static void intel_find_lvds_downclock(struct drm_device *dev,
924 struct drm_connector *connector)
925{
926 struct drm_i915_private *dev_priv = dev->dev_private;
927 struct drm_display_mode *scan, *panel_fixed_mode;
928 int temp_downclock;
929
930 panel_fixed_mode = dev_priv->panel_fixed_mode;
931 temp_downclock = panel_fixed_mode->clock;
932
933 mutex_lock(&dev->mode_config.mutex);
934 list_for_each_entry(scan, &connector->probed_modes, head) {
935 /*
936 * If one mode has the same resolution with the fixed_panel
937 * mode while they have the different refresh rate, it means
938 * that the reduced downclock is found for the LVDS. In such
939 * case we can set the different FPx0/1 to dynamically select
940 * between low and high frequency.
941 */
942 if (scan->hdisplay == panel_fixed_mode->hdisplay &&
943 scan->hsync_start == panel_fixed_mode->hsync_start &&
944 scan->hsync_end == panel_fixed_mode->hsync_end &&
945 scan->htotal == panel_fixed_mode->htotal &&
946 scan->vdisplay == panel_fixed_mode->vdisplay &&
947 scan->vsync_start == panel_fixed_mode->vsync_start &&
948 scan->vsync_end == panel_fixed_mode->vsync_end &&
949 scan->vtotal == panel_fixed_mode->vtotal) {
950 if (scan->clock < temp_downclock) {
951 /*
952 * The downclock is already found. But we
953 * expect to find the lower downclock.
954 */
955 temp_downclock = scan->clock;
956 }
957 }
958 }
959 mutex_unlock(&dev->mode_config.mutex);
960 if (temp_downclock < panel_fixed_mode->clock) {
961 /* We found the downclock for LVDS. */
962 dev_priv->lvds_downclock_avail = 1;
963 dev_priv->lvds_downclock = temp_downclock;
964 DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
965 "Normal clock %dKhz, downclock %dKhz\n",
966 panel_fixed_mode->clock, temp_downclock);
967 }
968 return;
969}
970
971/*
972 * Enumerate the child dev array parsed from VBT to check whether
973 * the LVDS is present.
974 * If it is present, return 1.
975 * If it is not present, return false.
976 * If no child dev is parsed from VBT, it assumes that the LVDS is present.
977 * Note: The addin_offset should also be checked for LVDS panel.
978 * Only when it is non-zero, it is assumed that it is present.
979 */
980static int lvds_is_present_in_vbt(struct drm_device *dev)
981{
982 struct drm_i915_private *dev_priv = dev->dev_private;
983 struct child_device_config *p_child;
984 int i, ret;
985
986 if (!dev_priv->child_dev_num)
987 return 1;
988
989 ret = 0;
990 for (i = 0; i < dev_priv->child_dev_num; i++) {
991 p_child = dev_priv->child_dev + i;
992 /*
993 * If the device type is not LFP, continue.
994 * If the device type is 0x22, it is also regarded as LFP.
995 */
996 if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
997 p_child->device_type != DEVICE_TYPE_LFP)
998 continue;
999
1000 /* The addin_offset should be checked. Only when it is
1001 * non-zero, it is regarded as present.
1002 */
1003 if (p_child->addin_offset) {
1004 ret = 1;
1005 break;
1006 }
1007 }
1008 return ret;
1009}
1010
1011/**
917 * intel_lvds_init - setup LVDS connectors on this device 1012 * intel_lvds_init - setup LVDS connectors on this device
918 * @dev: drm device 1013 * @dev: drm device
919 * 1014 *
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev)
936 if (dmi_check_system(intel_no_lvds)) 1031 if (dmi_check_system(intel_no_lvds))
937 return; 1032 return;
938 1033
939 /* Assume that any device without an ACPI LID device also doesn't 1034 /*
940 * have an integrated LVDS. We would be better off parsing the BIOS 1035 * Assume LVDS is present if there's an ACPI lid device or if the
941 * to get a reliable indicator, but that code isn't written yet. 1036 * device is present in the VBT.
942 *
943 * In the case of all-in-one desktops using LVDS that we've seen,
944 * they're using SDVO LVDS.
945 */ 1037 */
946 if (!intel_lid_present()) 1038 if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
1039 DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
947 return; 1040 return;
1041 }
948 1042
949 if (IS_IGDNG(dev)) { 1043 if (IS_IRONLAKE(dev)) {
950 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 1044 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
951 return; 1045 return;
952 if (dev_priv->edp_support) { 1046 if (dev_priv->edp_support) {
953 DRM_DEBUG("disable LVDS for eDP support\n"); 1047 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
954 return; 1048 return;
955 } 1049 }
956 gpio = PCH_GPIOC; 1050 gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev)
1023 dev_priv->panel_fixed_mode = 1117 dev_priv->panel_fixed_mode =
1024 drm_mode_duplicate(dev, scan); 1118 drm_mode_duplicate(dev, scan);
1025 mutex_unlock(&dev->mode_config.mutex); 1119 mutex_unlock(&dev->mode_config.mutex);
1120 intel_find_lvds_downclock(dev, connector);
1026 goto out; 1121 goto out;
1027 } 1122 }
1028 mutex_unlock(&dev->mode_config.mutex); 1123 mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev)
1047 * correct mode. 1142 * correct mode.
1048 */ 1143 */
1049 1144
1050 /* IGDNG: FIXME if still fail, not try pipe mode now */ 1145 /* Ironlake: FIXME if still fail, not try pipe mode now */
1051 if (IS_IGDNG(dev)) 1146 if (IS_IRONLAKE(dev))
1052 goto failed; 1147 goto failed;
1053 1148
1054 lvds = I915_READ(LVDS); 1149 lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev)
1069 goto failed; 1164 goto failed;
1070 1165
1071out: 1166out:
1072 if (IS_IGDNG(dev)) { 1167 if (IS_IRONLAKE(dev)) {
1073 u32 pwm; 1168 u32 pwm;
1074 /* make sure PWM is enabled */ 1169 /* make sure PWM is enabled */
1075 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1170 pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@ out:
1082 } 1177 }
1083 dev_priv->lid_notifier.notifier_call = intel_lid_notify; 1178 dev_priv->lid_notifier.notifier_call = intel_lid_notify;
1084 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) { 1179 if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
1085 DRM_DEBUG("lid notifier registration failed\n"); 1180 DRM_DEBUG_KMS("lid notifier registration failed\n");
1086 dev_priv->lid_notifier.notifier_call = NULL; 1181 dev_priv->lid_notifier.notifier_call = NULL;
1087 } 1182 }
1088 drm_sysfs_connector_add(connector); 1183 drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@ failed:
1093 if (intel_output->ddc_bus) 1188 if (intel_output->ddc_bus)
1094 intel_i2c_destroy(intel_output->ddc_bus); 1189 intel_i2c_destroy(intel_output->ddc_bus);
1095 drm_connector_cleanup(connector); 1190 drm_connector_cleanup(connector);
1191 drm_encoder_cleanup(encoder);
1096 kfree(intel_output); 1192 kfree(intel_output);
1097} 1193}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
new file mode 100644
index 000000000000..2639591c72e9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -0,0 +1,1416 @@
1/*
2 * Copyright © 2009
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel@ffwll.ch>
25 *
26 * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
27 */
28#include "drmP.h"
29#include "drm.h"
30#include "i915_drm.h"
31#include "i915_drv.h"
32#include "i915_reg.h"
33#include "intel_drv.h"
34
35/* Limits for overlay size. According to intel doc, the real limits are:
36 * Y width: 4095, UV width (planar): 2047, Y height: 2047,
37 * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
38 * the mininum of both. */
39#define IMAGE_MAX_WIDTH 2048
40#define IMAGE_MAX_HEIGHT 2046 /* 2 * 1023 */
41/* on 830 and 845 these large limits result in the card hanging */
42#define IMAGE_MAX_WIDTH_LEGACY 1024
43#define IMAGE_MAX_HEIGHT_LEGACY 1088
44
45/* overlay register definitions */
46/* OCMD register */
47#define OCMD_TILED_SURFACE (0x1<<19)
48#define OCMD_MIRROR_MASK (0x3<<17)
49#define OCMD_MIRROR_MODE (0x3<<17)
50#define OCMD_MIRROR_HORIZONTAL (0x1<<17)
51#define OCMD_MIRROR_VERTICAL (0x2<<17)
52#define OCMD_MIRROR_BOTH (0x3<<17)
53#define OCMD_BYTEORDER_MASK (0x3<<14) /* zero for YUYV or FOURCC YUY2 */
54#define OCMD_UV_SWAP (0x1<<14) /* YVYU */
55#define OCMD_Y_SWAP (0x2<<14) /* UYVY or FOURCC UYVY */
56#define OCMD_Y_AND_UV_SWAP (0x3<<14) /* VYUY */
57#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
58#define OCMD_RGB_888 (0x1<<10) /* not in i965 Intel docs */
59#define OCMD_RGB_555 (0x2<<10) /* not in i965 Intel docs */
60#define OCMD_RGB_565 (0x3<<10) /* not in i965 Intel docs */
61#define OCMD_YUV_422_PACKED (0x8<<10)
62#define OCMD_YUV_411_PACKED (0x9<<10) /* not in i965 Intel docs */
63#define OCMD_YUV_420_PLANAR (0xc<<10)
64#define OCMD_YUV_422_PLANAR (0xd<<10)
65#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
66#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
67#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
68#define OCMD_BUF_TYPE_MASK (Ox1<<5)
69#define OCMD_BUF_TYPE_FRAME (0x0<<5)
70#define OCMD_BUF_TYPE_FIELD (0x1<<5)
71#define OCMD_TEST_MODE (0x1<<4)
72#define OCMD_BUFFER_SELECT (0x3<<2)
73#define OCMD_BUFFER0 (0x0<<2)
74#define OCMD_BUFFER1 (0x1<<2)
75#define OCMD_FIELD_SELECT (0x1<<2)
76#define OCMD_FIELD0 (0x0<<1)
77#define OCMD_FIELD1 (0x1<<1)
78#define OCMD_ENABLE (0x1<<0)
79
80/* OCONFIG register */
81#define OCONF_PIPE_MASK (0x1<<18)
82#define OCONF_PIPE_A (0x0<<18)
83#define OCONF_PIPE_B (0x1<<18)
84#define OCONF_GAMMA2_ENABLE (0x1<<16)
85#define OCONF_CSC_MODE_BT601 (0x0<<5)
86#define OCONF_CSC_MODE_BT709 (0x1<<5)
87#define OCONF_CSC_BYPASS (0x1<<4)
88#define OCONF_CC_OUT_8BIT (0x1<<3)
89#define OCONF_TEST_MODE (0x1<<2)
90#define OCONF_THREE_LINE_BUFFER (0x1<<0)
91#define OCONF_TWO_LINE_BUFFER (0x0<<0)
92
93/* DCLRKM (dst-key) register */
94#define DST_KEY_ENABLE (0x1<<31)
95#define CLK_RGB24_MASK 0x0
96#define CLK_RGB16_MASK 0x070307
97#define CLK_RGB15_MASK 0x070707
98#define CLK_RGB8I_MASK 0xffffff
99
100#define RGB16_TO_COLORKEY(c) \
101 (((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
102#define RGB15_TO_COLORKEY(c) \
103 (((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
104
105/* overlay flip addr flag */
106#define OFC_UPDATE 0x1
107
108/* polyphase filter coefficients */
109#define N_HORIZ_Y_TAPS 5
110#define N_VERT_Y_TAPS 3
111#define N_HORIZ_UV_TAPS 3
112#define N_VERT_UV_TAPS 3
113#define N_PHASES 17
114#define MAX_TAPS 5
115
116/* memory bufferd overlay registers */
117struct overlay_registers {
118 u32 OBUF_0Y;
119 u32 OBUF_1Y;
120 u32 OBUF_0U;
121 u32 OBUF_0V;
122 u32 OBUF_1U;
123 u32 OBUF_1V;
124 u32 OSTRIDE;
125 u32 YRGB_VPH;
126 u32 UV_VPH;
127 u32 HORZ_PH;
128 u32 INIT_PHS;
129 u32 DWINPOS;
130 u32 DWINSZ;
131 u32 SWIDTH;
132 u32 SWIDTHSW;
133 u32 SHEIGHT;
134 u32 YRGBSCALE;
135 u32 UVSCALE;
136 u32 OCLRC0;
137 u32 OCLRC1;
138 u32 DCLRKV;
139 u32 DCLRKM;
140 u32 SCLRKVH;
141 u32 SCLRKVL;
142 u32 SCLRKEN;
143 u32 OCONFIG;
144 u32 OCMD;
145 u32 RESERVED1; /* 0x6C */
146 u32 OSTART_0Y;
147 u32 OSTART_1Y;
148 u32 OSTART_0U;
149 u32 OSTART_0V;
150 u32 OSTART_1U;
151 u32 OSTART_1V;
152 u32 OTILEOFF_0Y;
153 u32 OTILEOFF_1Y;
154 u32 OTILEOFF_0U;
155 u32 OTILEOFF_0V;
156 u32 OTILEOFF_1U;
157 u32 OTILEOFF_1V;
158 u32 FASTHSCALE; /* 0xA0 */
159 u32 UVSCALEV; /* 0xA4 */
160 u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
161 u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
162 u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
163 u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
164 u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
165 u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
166 u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
167 u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
168 u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
169};
170
171/* overlay flip addr flag */
172#define OFC_UPDATE 0x1
173
174#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
175#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
176
177
178static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
179{
180 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
181 struct overlay_registers *regs;
182
183 /* no recursive mappings */
184 BUG_ON(overlay->virt_addr);
185
186 if (OVERLAY_NONPHYSICAL(overlay->dev)) {
187 regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
188 overlay->reg_bo->gtt_offset);
189
190 if (!regs) {
191 DRM_ERROR("failed to map overlay regs in GTT\n");
192 return NULL;
193 }
194 } else
195 regs = overlay->reg_bo->phys_obj->handle->vaddr;
196
197 return overlay->virt_addr = regs;
198}
199
200static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
201{
202 struct drm_device *dev = overlay->dev;
203 drm_i915_private_t *dev_priv = dev->dev_private;
204
205 if (OVERLAY_NONPHYSICAL(overlay->dev))
206 io_mapping_unmap_atomic(overlay->virt_addr);
207
208 overlay->virt_addr = NULL;
209
210 I915_READ(OVADD); /* flush wc cashes */
211
212 return;
213}
214
215/* overlay needs to be disable in OCMD reg */
216static int intel_overlay_on(struct intel_overlay *overlay)
217{
218 struct drm_device *dev = overlay->dev;
219 drm_i915_private_t *dev_priv = dev->dev_private;
220 int ret;
221 RING_LOCALS;
222
223 BUG_ON(overlay->active);
224
225 overlay->active = 1;
226 overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
227
228 BEGIN_LP_RING(6);
229 OUT_RING(MI_FLUSH);
230 OUT_RING(MI_NOOP);
231 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
232 OUT_RING(overlay->flip_addr | OFC_UPDATE);
233 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
234 OUT_RING(MI_NOOP);
235 ADVANCE_LP_RING();
236
237 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
238 if (overlay->last_flip_req == 0)
239 return -ENOMEM;
240
241 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
242 if (ret != 0)
243 return ret;
244
245 overlay->hw_wedged = 0;
246 overlay->last_flip_req = 0;
247 return 0;
248}
249
250/* overlay needs to be enabled in OCMD reg */
251static void intel_overlay_continue(struct intel_overlay *overlay,
252 bool load_polyphase_filter)
253{
254 struct drm_device *dev = overlay->dev;
255 drm_i915_private_t *dev_priv = dev->dev_private;
256 u32 flip_addr = overlay->flip_addr;
257 u32 tmp;
258 RING_LOCALS;
259
260 BUG_ON(!overlay->active);
261
262 if (load_polyphase_filter)
263 flip_addr |= OFC_UPDATE;
264
265 /* check for underruns */
266 tmp = I915_READ(DOVSTA);
267 if (tmp & (1 << 17))
268 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
269
270 BEGIN_LP_RING(4);
271 OUT_RING(MI_FLUSH);
272 OUT_RING(MI_NOOP);
273 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
274 OUT_RING(flip_addr);
275 ADVANCE_LP_RING();
276
277 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
278}
279
280static int intel_overlay_wait_flip(struct intel_overlay *overlay)
281{
282 struct drm_device *dev = overlay->dev;
283 drm_i915_private_t *dev_priv = dev->dev_private;
284 int ret;
285 u32 tmp;
286 RING_LOCALS;
287
288 if (overlay->last_flip_req != 0) {
289 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
290 if (ret == 0) {
291 overlay->last_flip_req = 0;
292
293 tmp = I915_READ(ISR);
294
295 if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
296 return 0;
297 }
298 }
299
300 /* synchronous slowpath */
301 overlay->hw_wedged = RELEASE_OLD_VID;
302
303 BEGIN_LP_RING(2);
304 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
305 OUT_RING(MI_NOOP);
306 ADVANCE_LP_RING();
307
308 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
309 if (overlay->last_flip_req == 0)
310 return -ENOMEM;
311
312 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
313 if (ret != 0)
314 return ret;
315
316 overlay->hw_wedged = 0;
317 overlay->last_flip_req = 0;
318 return 0;
319}
320
321/* overlay needs to be disabled in OCMD reg */
322static int intel_overlay_off(struct intel_overlay *overlay)
323{
324 u32 flip_addr = overlay->flip_addr;
325 struct drm_device *dev = overlay->dev;
326 drm_i915_private_t *dev_priv = dev->dev_private;
327 int ret;
328 RING_LOCALS;
329
330 BUG_ON(!overlay->active);
331
332 /* According to intel docs the overlay hw may hang (when switching
333 * off) without loading the filter coeffs. It is however unclear whether
334 * this applies to the disabling of the overlay or to the switching off
335 * of the hw. Do it in both cases */
336 flip_addr |= OFC_UPDATE;
337
338 /* wait for overlay to go idle */
339 overlay->hw_wedged = SWITCH_OFF_STAGE_1;
340
341 BEGIN_LP_RING(6);
342 OUT_RING(MI_FLUSH);
343 OUT_RING(MI_NOOP);
344 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
345 OUT_RING(flip_addr);
346 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
347 OUT_RING(MI_NOOP);
348 ADVANCE_LP_RING();
349
350 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
351 if (overlay->last_flip_req == 0)
352 return -ENOMEM;
353
354 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
355 if (ret != 0)
356 return ret;
357
358 /* turn overlay off */
359 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
360
361 BEGIN_LP_RING(6);
362 OUT_RING(MI_FLUSH);
363 OUT_RING(MI_NOOP);
364 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
365 OUT_RING(flip_addr);
366 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
367 OUT_RING(MI_NOOP);
368 ADVANCE_LP_RING();
369
370 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
371 if (overlay->last_flip_req == 0)
372 return -ENOMEM;
373
374 ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
375 if (ret != 0)
376 return ret;
377
378 overlay->hw_wedged = 0;
379 overlay->last_flip_req = 0;
380 return ret;
381}
382
383static void intel_overlay_off_tail(struct intel_overlay *overlay)
384{
385 struct drm_gem_object *obj;
386
387 /* never have the overlay hw on without showing a frame */
388 BUG_ON(!overlay->vid_bo);
389 obj = overlay->vid_bo->obj;
390
391 i915_gem_object_unpin(obj);
392 drm_gem_object_unreference(obj);
393 overlay->vid_bo = NULL;
394
395 overlay->crtc->overlay = NULL;
396 overlay->crtc = NULL;
397 overlay->active = 0;
398}
399
400/* recover from an interruption due to a signal
401 * We have to be careful not to repeat work forever an make forward progess. */
402int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
403 int interruptible)
404{
405 struct drm_device *dev = overlay->dev;
406 drm_i915_private_t *dev_priv = dev->dev_private;
407 struct drm_gem_object *obj;
408 u32 flip_addr;
409 int ret;
410 RING_LOCALS;
411
412 if (overlay->hw_wedged == HW_WEDGED)
413 return -EIO;
414
415 if (overlay->last_flip_req == 0) {
416 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
417 if (overlay->last_flip_req == 0)
418 return -ENOMEM;
419 }
420
421 ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
422 if (ret != 0)
423 return ret;
424
425 switch (overlay->hw_wedged) {
426 case RELEASE_OLD_VID:
427 obj = overlay->old_vid_bo->obj;
428 i915_gem_object_unpin(obj);
429 drm_gem_object_unreference(obj);
430 overlay->old_vid_bo = NULL;
431 break;
432 case SWITCH_OFF_STAGE_1:
433 flip_addr = overlay->flip_addr;
434 flip_addr |= OFC_UPDATE;
435
436 overlay->hw_wedged = SWITCH_OFF_STAGE_2;
437
438 BEGIN_LP_RING(6);
439 OUT_RING(MI_FLUSH);
440 OUT_RING(MI_NOOP);
441 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
442 OUT_RING(flip_addr);
443 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
444 OUT_RING(MI_NOOP);
445 ADVANCE_LP_RING();
446
447 overlay->last_flip_req = i915_add_request(dev, NULL, 0);
448 if (overlay->last_flip_req == 0)
449 return -ENOMEM;
450
451 ret = i915_do_wait_request(dev, overlay->last_flip_req,
452 interruptible);
453 if (ret != 0)
454 return ret;
455
456 case SWITCH_OFF_STAGE_2:
457 intel_overlay_off_tail(overlay);
458 break;
459 default:
460 BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
461 }
462
463 overlay->hw_wedged = 0;
464 overlay->last_flip_req = 0;
465 return 0;
466}
467
468/* Wait for pending overlay flip and release old frame.
469 * Needs to be called before the overlay register are changed
470 * via intel_overlay_(un)map_regs_atomic */
471static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
472{
473 int ret;
474 struct drm_gem_object *obj;
475
476 /* only wait if there is actually an old frame to release to
477 * guarantee forward progress */
478 if (!overlay->old_vid_bo)
479 return 0;
480
481 ret = intel_overlay_wait_flip(overlay);
482 if (ret != 0)
483 return ret;
484
485 obj = overlay->old_vid_bo->obj;
486 i915_gem_object_unpin(obj);
487 drm_gem_object_unreference(obj);
488 overlay->old_vid_bo = NULL;
489
490 return 0;
491}
492
493struct put_image_params {
494 int format;
495 short dst_x;
496 short dst_y;
497 short dst_w;
498 short dst_h;
499 short src_w;
500 short src_scan_h;
501 short src_scan_w;
502 short src_h;
503 short stride_Y;
504 short stride_UV;
505 int offset_Y;
506 int offset_U;
507 int offset_V;
508};
509
510static int packed_depth_bytes(u32 format)
511{
512 switch (format & I915_OVERLAY_DEPTH_MASK) {
513 case I915_OVERLAY_YUV422:
514 return 4;
515 case I915_OVERLAY_YUV411:
516 /* return 6; not implemented */
517 default:
518 return -EINVAL;
519 }
520}
521
522static int packed_width_bytes(u32 format, short width)
523{
524 switch (format & I915_OVERLAY_DEPTH_MASK) {
525 case I915_OVERLAY_YUV422:
526 return width << 1;
527 default:
528 return -EINVAL;
529 }
530}
531
532static int uv_hsubsampling(u32 format)
533{
534 switch (format & I915_OVERLAY_DEPTH_MASK) {
535 case I915_OVERLAY_YUV422:
536 case I915_OVERLAY_YUV420:
537 return 2;
538 case I915_OVERLAY_YUV411:
539 case I915_OVERLAY_YUV410:
540 return 4;
541 default:
542 return -EINVAL;
543 }
544}
545
546static int uv_vsubsampling(u32 format)
547{
548 switch (format & I915_OVERLAY_DEPTH_MASK) {
549 case I915_OVERLAY_YUV420:
550 case I915_OVERLAY_YUV410:
551 return 2;
552 case I915_OVERLAY_YUV422:
553 case I915_OVERLAY_YUV411:
554 return 1;
555 default:
556 return -EINVAL;
557 }
558}
559
560static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
561{
562 u32 mask, shift, ret;
563 if (IS_I9XX(dev)) {
564 mask = 0x3f;
565 shift = 6;
566 } else {
567 mask = 0x1f;
568 shift = 5;
569 }
570 ret = ((offset + width + mask) >> shift) - (offset >> shift);
571 if (IS_I9XX(dev))
572 ret <<= 1;
573 ret -=1;
574 return ret << 2;
575}
576
577static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
578 0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
579 0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
580 0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
581 0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
582 0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
583 0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
584 0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
585 0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
586 0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
587 0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
588 0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
589 0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
590 0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
591 0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
592 0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
593 0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
594 0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
595static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
596 0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
597 0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
598 0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
599 0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
600 0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
601 0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
602 0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
603 0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
604 0x3000, 0x0800, 0x3000};
605
606static void update_polyphase_filter(struct overlay_registers *regs)
607{
608 memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
609 memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
610}
611
612static bool update_scaling_factors(struct intel_overlay *overlay,
613 struct overlay_registers *regs,
614 struct put_image_params *params)
615{
616 /* fixed point with a 12 bit shift */
617 u32 xscale, yscale, xscale_UV, yscale_UV;
618#define FP_SHIFT 12
619#define FRACT_MASK 0xfff
620 bool scale_changed = false;
621 int uv_hscale = uv_hsubsampling(params->format);
622 int uv_vscale = uv_vsubsampling(params->format);
623
624 if (params->dst_w > 1)
625 xscale = ((params->src_scan_w - 1) << FP_SHIFT)
626 /(params->dst_w);
627 else
628 xscale = 1 << FP_SHIFT;
629
630 if (params->dst_h > 1)
631 yscale = ((params->src_scan_h - 1) << FP_SHIFT)
632 /(params->dst_h);
633 else
634 yscale = 1 << FP_SHIFT;
635
636 /*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
637 xscale_UV = xscale/uv_hscale;
638 yscale_UV = yscale/uv_vscale;
639 /* make the Y scale to UV scale ratio an exact multiply */
640 xscale = xscale_UV * uv_hscale;
641 yscale = yscale_UV * uv_vscale;
642 /*} else {
643 xscale_UV = 0;
644 yscale_UV = 0;
645 }*/
646
647 if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
648 scale_changed = true;
649 overlay->old_xscale = xscale;
650 overlay->old_yscale = yscale;
651
652 regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
653 | ((xscale >> FP_SHIFT) << 16)
654 | ((xscale & FRACT_MASK) << 3);
655 regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
656 | ((xscale_UV >> FP_SHIFT) << 16)
657 | ((xscale_UV & FRACT_MASK) << 3);
658 regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
659 | ((yscale_UV >> FP_SHIFT) << 0);
660
661 if (scale_changed)
662 update_polyphase_filter(regs);
663
664 return scale_changed;
665}
666
667static void update_colorkey(struct intel_overlay *overlay,
668 struct overlay_registers *regs)
669{
670 u32 key = overlay->color_key;
671 switch (overlay->crtc->base.fb->bits_per_pixel) {
672 case 8:
673 regs->DCLRKV = 0;
674 regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
675 case 16:
676 if (overlay->crtc->base.fb->depth == 15) {
677 regs->DCLRKV = RGB15_TO_COLORKEY(key);
678 regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
679 } else {
680 regs->DCLRKV = RGB16_TO_COLORKEY(key);
681 regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
682 }
683 case 24:
684 case 32:
685 regs->DCLRKV = key;
686 regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
687 }
688}
689
690static u32 overlay_cmd_reg(struct put_image_params *params)
691{
692 u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
693
694 if (params->format & I915_OVERLAY_YUV_PLANAR) {
695 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
696 case I915_OVERLAY_YUV422:
697 cmd |= OCMD_YUV_422_PLANAR;
698 break;
699 case I915_OVERLAY_YUV420:
700 cmd |= OCMD_YUV_420_PLANAR;
701 break;
702 case I915_OVERLAY_YUV411:
703 case I915_OVERLAY_YUV410:
704 cmd |= OCMD_YUV_410_PLANAR;
705 break;
706 }
707 } else { /* YUV packed */
708 switch (params->format & I915_OVERLAY_DEPTH_MASK) {
709 case I915_OVERLAY_YUV422:
710 cmd |= OCMD_YUV_422_PACKED;
711 break;
712 case I915_OVERLAY_YUV411:
713 cmd |= OCMD_YUV_411_PACKED;
714 break;
715 }
716
717 switch (params->format & I915_OVERLAY_SWAP_MASK) {
718 case I915_OVERLAY_NO_SWAP:
719 break;
720 case I915_OVERLAY_UV_SWAP:
721 cmd |= OCMD_UV_SWAP;
722 break;
723 case I915_OVERLAY_Y_SWAP:
724 cmd |= OCMD_Y_SWAP;
725 break;
726 case I915_OVERLAY_Y_AND_UV_SWAP:
727 cmd |= OCMD_Y_AND_UV_SWAP;
728 break;
729 }
730 }
731
732 return cmd;
733}
734
735int intel_overlay_do_put_image(struct intel_overlay *overlay,
736 struct drm_gem_object *new_bo,
737 struct put_image_params *params)
738{
739 int ret, tmp_width;
740 struct overlay_registers *regs;
741 bool scale_changed = false;
742 struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
743 struct drm_device *dev = overlay->dev;
744
745 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
746 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
747 BUG_ON(!overlay);
748
749 ret = intel_overlay_release_old_vid(overlay);
750 if (ret != 0)
751 return ret;
752
753 ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
754 if (ret != 0)
755 return ret;
756
757 ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
758 if (ret != 0)
759 goto out_unpin;
760
761 if (!overlay->active) {
762 regs = intel_overlay_map_regs_atomic(overlay);
763 if (!regs) {
764 ret = -ENOMEM;
765 goto out_unpin;
766 }
767 regs->OCONFIG = OCONF_CC_OUT_8BIT;
768 if (IS_I965GM(overlay->dev))
769 regs->OCONFIG |= OCONF_CSC_MODE_BT709;
770 regs->OCONFIG |= overlay->crtc->pipe == 0 ?
771 OCONF_PIPE_A : OCONF_PIPE_B;
772 intel_overlay_unmap_regs_atomic(overlay);
773
774 ret = intel_overlay_on(overlay);
775 if (ret != 0)
776 goto out_unpin;
777 }
778
779 regs = intel_overlay_map_regs_atomic(overlay);
780 if (!regs) {
781 ret = -ENOMEM;
782 goto out_unpin;
783 }
784
785 regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
786 regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
787
788 if (params->format & I915_OVERLAY_YUV_PACKED)
789 tmp_width = packed_width_bytes(params->format, params->src_w);
790 else
791 tmp_width = params->src_w;
792
793 regs->SWIDTH = params->src_w;
794 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
795 params->offset_Y, tmp_width);
796 regs->SHEIGHT = params->src_h;
797 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
798 regs->OSTRIDE = params->stride_Y;
799
800 if (params->format & I915_OVERLAY_YUV_PLANAR) {
801 int uv_hscale = uv_hsubsampling(params->format);
802 int uv_vscale = uv_vsubsampling(params->format);
803 u32 tmp_U, tmp_V;
804 regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
805 tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
806 params->src_w/uv_hscale);
807 tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
808 params->src_w/uv_hscale);
809 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
810 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
811 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
812 regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
813 regs->OSTRIDE |= params->stride_UV << 16;
814 }
815
816 scale_changed = update_scaling_factors(overlay, regs, params);
817
818 update_colorkey(overlay, regs);
819
820 regs->OCMD = overlay_cmd_reg(params);
821
822 intel_overlay_unmap_regs_atomic(overlay);
823
824 intel_overlay_continue(overlay, scale_changed);
825
826 overlay->old_vid_bo = overlay->vid_bo;
827 overlay->vid_bo = new_bo->driver_private;
828
829 return 0;
830
831out_unpin:
832 i915_gem_object_unpin(new_bo);
833 return ret;
834}
835
836int intel_overlay_switch_off(struct intel_overlay *overlay)
837{
838 int ret;
839 struct overlay_registers *regs;
840 struct drm_device *dev = overlay->dev;
841
842 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
843 BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
844
845 if (overlay->hw_wedged) {
846 ret = intel_overlay_recover_from_interrupt(overlay, 1);
847 if (ret != 0)
848 return ret;
849 }
850
851 if (!overlay->active)
852 return 0;
853
854 ret = intel_overlay_release_old_vid(overlay);
855 if (ret != 0)
856 return ret;
857
858 regs = intel_overlay_map_regs_atomic(overlay);
859 regs->OCMD = 0;
860 intel_overlay_unmap_regs_atomic(overlay);
861
862 ret = intel_overlay_off(overlay);
863 if (ret != 0)
864 return ret;
865
866 intel_overlay_off_tail(overlay);
867
868 return 0;
869}
870
871static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
872 struct intel_crtc *crtc)
873{
874 drm_i915_private_t *dev_priv = overlay->dev->dev_private;
875 u32 pipeconf;
876 int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
877
878 if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
879 return -EINVAL;
880
881 pipeconf = I915_READ(pipeconf_reg);
882
883 /* can't use the overlay with double wide pipe */
884 if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
885 return -EINVAL;
886
887 return 0;
888}
889
890static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
891{
892 struct drm_device *dev = overlay->dev;
893 drm_i915_private_t *dev_priv = dev->dev_private;
894 u32 ratio;
895 u32 pfit_control = I915_READ(PFIT_CONTROL);
896
897 /* XXX: This is not the same logic as in the xorg driver, but more in
898 * line with the intel documentation for the i965 */
899 if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
900 ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
901 } else { /* on i965 use the PGM reg to read out the autoscaler values */
902 ratio = I915_READ(PFIT_PGM_RATIOS);
903 if (IS_I965G(dev))
904 ratio >>= PFIT_VERT_SCALE_SHIFT_965;
905 else
906 ratio >>= PFIT_VERT_SCALE_SHIFT;
907 }
908
909 overlay->pfit_vscale_ratio = ratio;
910}
911
912static int check_overlay_dst(struct intel_overlay *overlay,
913 struct drm_intel_overlay_put_image *rec)
914{
915 struct drm_display_mode *mode = &overlay->crtc->base.mode;
916
917 if ((rec->dst_x < mode->crtc_hdisplay)
918 && (rec->dst_x + rec->dst_width
919 <= mode->crtc_hdisplay)
920 && (rec->dst_y < mode->crtc_vdisplay)
921 && (rec->dst_y + rec->dst_height
922 <= mode->crtc_vdisplay))
923 return 0;
924 else
925 return -EINVAL;
926}
927
928static int check_overlay_scaling(struct put_image_params *rec)
929{
930 u32 tmp;
931
932 /* downscaling limit is 8.0 */
933 tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
934 if (tmp > 7)
935 return -EINVAL;
936 tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
937 if (tmp > 7)
938 return -EINVAL;
939
940 return 0;
941}
942
943static int check_overlay_src(struct drm_device *dev,
944 struct drm_intel_overlay_put_image *rec,
945 struct drm_gem_object *new_bo)
946{
947 u32 stride_mask;
948 int depth;
949 int uv_hscale = uv_hsubsampling(rec->flags);
950 int uv_vscale = uv_vsubsampling(rec->flags);
951 size_t tmp;
952
953 /* check src dimensions */
954 if (IS_845G(dev) || IS_I830(dev)) {
955 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
956 || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
957 return -EINVAL;
958 } else {
959 if (rec->src_height > IMAGE_MAX_HEIGHT
960 || rec->src_width > IMAGE_MAX_WIDTH)
961 return -EINVAL;
962 }
963 /* better safe than sorry, use 4 as the maximal subsampling ratio */
964 if (rec->src_height < N_VERT_Y_TAPS*4
965 || rec->src_width < N_HORIZ_Y_TAPS*4)
966 return -EINVAL;
967
968 /* check alingment constrains */
969 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
970 case I915_OVERLAY_RGB:
971 /* not implemented */
972 return -EINVAL;
973 case I915_OVERLAY_YUV_PACKED:
974 depth = packed_depth_bytes(rec->flags);
975 if (uv_vscale != 1)
976 return -EINVAL;
977 if (depth < 0)
978 return depth;
979 /* ignore UV planes */
980 rec->stride_UV = 0;
981 rec->offset_U = 0;
982 rec->offset_V = 0;
983 /* check pixel alignment */
984 if (rec->offset_Y % depth)
985 return -EINVAL;
986 break;
987 case I915_OVERLAY_YUV_PLANAR:
988 if (uv_vscale < 0 || uv_hscale < 0)
989 return -EINVAL;
990 /* no offset restrictions for planar formats */
991 break;
992 default:
993 return -EINVAL;
994 }
995
996 if (rec->src_width % uv_hscale)
997 return -EINVAL;
998
999 /* stride checking */
1000 stride_mask = 63;
1001
1002 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1003 return -EINVAL;
1004 if (IS_I965G(dev) && rec->stride_Y < 512)
1005 return -EINVAL;
1006
1007 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
1008 4 : 8;
1009 if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
1010 return -EINVAL;
1011
1012 /* check buffer dimensions */
1013 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
1014 case I915_OVERLAY_RGB:
1015 case I915_OVERLAY_YUV_PACKED:
1016 /* always 4 Y values per depth pixels */
1017 if (packed_width_bytes(rec->flags, rec->src_width)
1018 > rec->stride_Y)
1019 return -EINVAL;
1020
1021 tmp = rec->stride_Y*rec->src_height;
1022 if (rec->offset_Y + tmp > new_bo->size)
1023 return -EINVAL;
1024 break;
1025 case I915_OVERLAY_YUV_PLANAR:
1026 if (rec->src_width > rec->stride_Y)
1027 return -EINVAL;
1028 if (rec->src_width/uv_hscale > rec->stride_UV)
1029 return -EINVAL;
1030
1031 tmp = rec->stride_Y*rec->src_height;
1032 if (rec->offset_Y + tmp > new_bo->size)
1033 return -EINVAL;
1034 tmp = rec->stride_UV*rec->src_height;
1035 tmp /= uv_vscale;
1036 if (rec->offset_U + tmp > new_bo->size
1037 || rec->offset_V + tmp > new_bo->size)
1038 return -EINVAL;
1039 break;
1040 }
1041
1042 return 0;
1043}
1044
1045int intel_overlay_put_image(struct drm_device *dev, void *data,
1046 struct drm_file *file_priv)
1047{
1048 struct drm_intel_overlay_put_image *put_image_rec = data;
1049 drm_i915_private_t *dev_priv = dev->dev_private;
1050 struct intel_overlay *overlay;
1051 struct drm_mode_object *drmmode_obj;
1052 struct intel_crtc *crtc;
1053 struct drm_gem_object *new_bo;
1054 struct put_image_params *params;
1055 int ret;
1056
1057 if (!dev_priv) {
1058 DRM_ERROR("called with no initialization\n");
1059 return -EINVAL;
1060 }
1061
1062 overlay = dev_priv->overlay;
1063 if (!overlay) {
1064 DRM_DEBUG("userspace bug: no overlay\n");
1065 return -ENODEV;
1066 }
1067
1068 if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
1069 mutex_lock(&dev->mode_config.mutex);
1070 mutex_lock(&dev->struct_mutex);
1071
1072 ret = intel_overlay_switch_off(overlay);
1073
1074 mutex_unlock(&dev->struct_mutex);
1075 mutex_unlock(&dev->mode_config.mutex);
1076
1077 return ret;
1078 }
1079
1080 params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
1081 if (!params)
1082 return -ENOMEM;
1083
1084 drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
1085 DRM_MODE_OBJECT_CRTC);
1086 if (!drmmode_obj)
1087 return -ENOENT;
1088 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1089
1090 new_bo = drm_gem_object_lookup(dev, file_priv,
1091 put_image_rec->bo_handle);
1092 if (!new_bo)
1093 return -ENOENT;
1094
1095 mutex_lock(&dev->mode_config.mutex);
1096 mutex_lock(&dev->struct_mutex);
1097
1098 if (overlay->hw_wedged) {
1099 ret = intel_overlay_recover_from_interrupt(overlay, 1);
1100 if (ret != 0)
1101 goto out_unlock;
1102 }
1103
1104 if (overlay->crtc != crtc) {
1105 struct drm_display_mode *mode = &crtc->base.mode;
1106 ret = intel_overlay_switch_off(overlay);
1107 if (ret != 0)
1108 goto out_unlock;
1109
1110 ret = check_overlay_possible_on_crtc(overlay, crtc);
1111 if (ret != 0)
1112 goto out_unlock;
1113
1114 overlay->crtc = crtc;
1115 crtc->overlay = overlay;
1116
1117 if (intel_panel_fitter_pipe(dev) == crtc->pipe
1118 /* and line to wide, i.e. one-line-mode */
1119 && mode->hdisplay > 1024) {
1120 overlay->pfit_active = 1;
1121 update_pfit_vscale_ratio(overlay);
1122 } else
1123 overlay->pfit_active = 0;
1124 }
1125
1126 ret = check_overlay_dst(overlay, put_image_rec);
1127 if (ret != 0)
1128 goto out_unlock;
1129
1130 if (overlay->pfit_active) {
1131 params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
1132 overlay->pfit_vscale_ratio);
1133 /* shifting right rounds downwards, so add 1 */
1134 params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
1135 overlay->pfit_vscale_ratio) + 1;
1136 } else {
1137 params->dst_y = put_image_rec->dst_y;
1138 params->dst_h = put_image_rec->dst_height;
1139 }
1140 params->dst_x = put_image_rec->dst_x;
1141 params->dst_w = put_image_rec->dst_width;
1142
1143 params->src_w = put_image_rec->src_width;
1144 params->src_h = put_image_rec->src_height;
1145 params->src_scan_w = put_image_rec->src_scan_width;
1146 params->src_scan_h = put_image_rec->src_scan_height;
1147 if (params->src_scan_h > params->src_h
1148 || params->src_scan_w > params->src_w) {
1149 ret = -EINVAL;
1150 goto out_unlock;
1151 }
1152
1153 ret = check_overlay_src(dev, put_image_rec, new_bo);
1154 if (ret != 0)
1155 goto out_unlock;
1156 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
1157 params->stride_Y = put_image_rec->stride_Y;
1158 params->stride_UV = put_image_rec->stride_UV;
1159 params->offset_Y = put_image_rec->offset_Y;
1160 params->offset_U = put_image_rec->offset_U;
1161 params->offset_V = put_image_rec->offset_V;
1162
1163 /* Check scaling after src size to prevent a divide-by-zero. */
1164 ret = check_overlay_scaling(params);
1165 if (ret != 0)
1166 goto out_unlock;
1167
1168 ret = intel_overlay_do_put_image(overlay, new_bo, params);
1169 if (ret != 0)
1170 goto out_unlock;
1171
1172 mutex_unlock(&dev->struct_mutex);
1173 mutex_unlock(&dev->mode_config.mutex);
1174
1175 kfree(params);
1176
1177 return 0;
1178
1179out_unlock:
1180 mutex_unlock(&dev->struct_mutex);
1181 mutex_unlock(&dev->mode_config.mutex);
1182 drm_gem_object_unreference(new_bo);
1183 kfree(params);
1184
1185 return ret;
1186}
1187
1188static void update_reg_attrs(struct intel_overlay *overlay,
1189 struct overlay_registers *regs)
1190{
1191 regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
1192 regs->OCLRC1 = overlay->saturation;
1193}
1194
1195static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
1196{
1197 int i;
1198
1199 if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
1200 return false;
1201
1202 for (i = 0; i < 3; i++) {
1203 if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
1204 return false;
1205 }
1206
1207 return true;
1208}
1209
1210static bool check_gamma5_errata(u32 gamma5)
1211{
1212 int i;
1213
1214 for (i = 0; i < 3; i++) {
1215 if (((gamma5 >> i*8) & 0xff) == 0x80)
1216 return false;
1217 }
1218
1219 return true;
1220}
1221
1222static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1223{
1224 if (!check_gamma_bounds(0, attrs->gamma0)
1225 || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
1226 || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
1227 || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
1228 || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
1229 || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
1230 || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
1231 return -EINVAL;
1232 if (!check_gamma5_errata(attrs->gamma5))
1233 return -EINVAL;
1234 return 0;
1235}
1236
1237int intel_overlay_attrs(struct drm_device *dev, void *data,
1238 struct drm_file *file_priv)
1239{
1240 struct drm_intel_overlay_attrs *attrs = data;
1241 drm_i915_private_t *dev_priv = dev->dev_private;
1242 struct intel_overlay *overlay;
1243 struct overlay_registers *regs;
1244 int ret;
1245
1246 if (!dev_priv) {
1247 DRM_ERROR("called with no initialization\n");
1248 return -EINVAL;
1249 }
1250
1251 overlay = dev_priv->overlay;
1252 if (!overlay) {
1253 DRM_DEBUG("userspace bug: no overlay\n");
1254 return -ENODEV;
1255 }
1256
1257 mutex_lock(&dev->mode_config.mutex);
1258 mutex_lock(&dev->struct_mutex);
1259
1260 if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
1261 attrs->color_key = overlay->color_key;
1262 attrs->brightness = overlay->brightness;
1263 attrs->contrast = overlay->contrast;
1264 attrs->saturation = overlay->saturation;
1265
1266 if (IS_I9XX(dev)) {
1267 attrs->gamma0 = I915_READ(OGAMC0);
1268 attrs->gamma1 = I915_READ(OGAMC1);
1269 attrs->gamma2 = I915_READ(OGAMC2);
1270 attrs->gamma3 = I915_READ(OGAMC3);
1271 attrs->gamma4 = I915_READ(OGAMC4);
1272 attrs->gamma5 = I915_READ(OGAMC5);
1273 }
1274 ret = 0;
1275 } else {
1276 overlay->color_key = attrs->color_key;
1277 if (attrs->brightness >= -128 && attrs->brightness <= 127) {
1278 overlay->brightness = attrs->brightness;
1279 } else {
1280 ret = -EINVAL;
1281 goto out_unlock;
1282 }
1283 if (attrs->contrast <= 255) {
1284 overlay->contrast = attrs->contrast;
1285 } else {
1286 ret = -EINVAL;
1287 goto out_unlock;
1288 }
1289 if (attrs->saturation <= 1023) {
1290 overlay->saturation = attrs->saturation;
1291 } else {
1292 ret = -EINVAL;
1293 goto out_unlock;
1294 }
1295
1296 regs = intel_overlay_map_regs_atomic(overlay);
1297 if (!regs) {
1298 ret = -ENOMEM;
1299 goto out_unlock;
1300 }
1301
1302 update_reg_attrs(overlay, regs);
1303
1304 intel_overlay_unmap_regs_atomic(overlay);
1305
1306 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1307 if (!IS_I9XX(dev)) {
1308 ret = -EINVAL;
1309 goto out_unlock;
1310 }
1311
1312 if (overlay->active) {
1313 ret = -EBUSY;
1314 goto out_unlock;
1315 }
1316
1317 ret = check_gamma(attrs);
1318 if (ret != 0)
1319 goto out_unlock;
1320
1321 I915_WRITE(OGAMC0, attrs->gamma0);
1322 I915_WRITE(OGAMC1, attrs->gamma1);
1323 I915_WRITE(OGAMC2, attrs->gamma2);
1324 I915_WRITE(OGAMC3, attrs->gamma3);
1325 I915_WRITE(OGAMC4, attrs->gamma4);
1326 I915_WRITE(OGAMC5, attrs->gamma5);
1327 }
1328 ret = 0;
1329 }
1330
1331out_unlock:
1332 mutex_unlock(&dev->struct_mutex);
1333 mutex_unlock(&dev->mode_config.mutex);
1334
1335 return ret;
1336}
1337
1338void intel_setup_overlay(struct drm_device *dev)
1339{
1340 drm_i915_private_t *dev_priv = dev->dev_private;
1341 struct intel_overlay *overlay;
1342 struct drm_gem_object *reg_bo;
1343 struct overlay_registers *regs;
1344 int ret;
1345
1346 if (!OVERLAY_EXISTS(dev))
1347 return;
1348
1349 overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
1350 if (!overlay)
1351 return;
1352 overlay->dev = dev;
1353
1354 reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
1355 if (!reg_bo)
1356 goto out_free;
1357 overlay->reg_bo = reg_bo->driver_private;
1358
1359 if (OVERLAY_NONPHYSICAL(dev)) {
1360 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
1361 if (ret) {
1362 DRM_ERROR("failed to pin overlay register bo\n");
1363 goto out_free_bo;
1364 }
1365 overlay->flip_addr = overlay->reg_bo->gtt_offset;
1366 } else {
1367 ret = i915_gem_attach_phys_object(dev, reg_bo,
1368 I915_GEM_PHYS_OVERLAY_REGS);
1369 if (ret) {
1370 DRM_ERROR("failed to attach phys overlay regs\n");
1371 goto out_free_bo;
1372 }
1373 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
1374 }
1375
1376 /* init all values */
1377 overlay->color_key = 0x0101fe;
1378 overlay->brightness = -19;
1379 overlay->contrast = 75;
1380 overlay->saturation = 146;
1381
1382 regs = intel_overlay_map_regs_atomic(overlay);
1383 if (!regs)
1384 goto out_free_bo;
1385
1386 memset(regs, 0, sizeof(struct overlay_registers));
1387 update_polyphase_filter(regs);
1388
1389 update_reg_attrs(overlay, regs);
1390
1391 intel_overlay_unmap_regs_atomic(overlay);
1392
1393 dev_priv->overlay = overlay;
1394 DRM_INFO("initialized overlay support\n");
1395 return;
1396
1397out_free_bo:
1398 drm_gem_object_unreference(reg_bo);
1399out_free:
1400 kfree(overlay);
1401 return;
1402}
1403
1404void intel_cleanup_overlay(struct drm_device *dev)
1405{
1406 drm_i915_private_t *dev_priv = dev->dev_private;
1407
1408 if (dev_priv->overlay) {
1409 /* The bo's should be free'd by the generic code already.
1410 * Furthermore modesetting teardown happens beforehand so the
1411 * hardware should be off already */
1412 BUG_ON(dev_priv->overlay->active);
1413
1414 kfree(dev_priv->overlay);
1415 }
1416}
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7fa3279e2f8..24a3dc99716c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,8 +36,6 @@
36#include "i915_drv.h" 36#include "i915_drv.h"
37#include "intel_sdvo_regs.h" 37#include "intel_sdvo_regs.h"
38 38
39#undef SDVO_DEBUG
40
41static char *tv_format_names[] = { 39static char *tv_format_names[] = {
42 "NTSC_M" , "NTSC_J" , "NTSC_443", 40 "NTSC_M" , "NTSC_J" , "NTSC_443",
43 "PAL_B" , "PAL_D" , "PAL_G" , 41 "PAL_B" , "PAL_D" , "PAL_G" ,
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
356#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 354#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
357#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv) 355#define SDVO_PRIV(output) ((struct intel_sdvo_priv *) (output)->dev_priv)
358 356
359#ifdef SDVO_DEBUG
360static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd, 357static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
361 void *args, int args_len) 358 void *args, int args_len)
362{ 359{
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
379 DRM_LOG_KMS("(%02X)", cmd); 376 DRM_LOG_KMS("(%02X)", cmd);
380 DRM_LOG_KMS("\n"); 377 DRM_LOG_KMS("\n");
381} 378}
382#else
383#define intel_sdvo_debug_write(o, c, a, l)
384#endif
385 379
386static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd, 380static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
387 void *args, int args_len) 381 void *args, int args_len)
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
398 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd); 392 intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
399} 393}
400 394
401#ifdef SDVO_DEBUG
402static const char *cmd_status_names[] = { 395static const char *cmd_status_names[] = {
403 "Power on", 396 "Power on",
404 "Success", 397 "Success",
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
427 DRM_LOG_KMS("(??? %d)", status); 420 DRM_LOG_KMS("(??? %d)", status);
428 DRM_LOG_KMS("\n"); 421 DRM_LOG_KMS("\n");
429} 422}
430#else
431#define intel_sdvo_debug_response(o, r, l, s)
432#endif
433 423
434static u8 intel_sdvo_read_response(struct intel_output *intel_output, 424static u8 intel_sdvo_read_response(struct intel_output *intel_output,
435 void *response, int response_len) 425 void *response, int response_len)
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1627 1617
1628 intel_sdvo_write_cmd(intel_output, 1618 intel_sdvo_write_cmd(intel_output,
1629 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0); 1619 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
1620 if (sdvo_priv->is_tv) {
1621 /* add 30ms delay when the output type is SDVO-TV */
1622 mdelay(30);
1623 }
1630 status = intel_sdvo_read_response(intel_output, &response, 2); 1624 status = intel_sdvo_read_response(intel_output, &response, 2);
1631 1625
1632 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8); 1626 DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 9ca917931afb..552ec110b741 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
1213 tv_ctl |= TV_TRILEVEL_SYNC; 1213 tv_ctl |= TV_TRILEVEL_SYNC;
1214 if (tv_mode->pal_burst) 1214 if (tv_mode->pal_burst)
1215 tv_ctl |= TV_PAL_BURST; 1215 tv_ctl |= TV_PAL_BURST;
1216
1216 scctl1 = 0; 1217 scctl1 = 0;
1217 /* dda1 implies valid video levels */ 1218 if (tv_mode->dda1_inc)
1218 if (tv_mode->dda1_inc) {
1219 scctl1 |= TV_SC_DDA1_EN; 1219 scctl1 |= TV_SC_DDA1_EN;
1220 }
1221
1222 if (tv_mode->dda2_inc) 1220 if (tv_mode->dda2_inc)
1223 scctl1 |= TV_SC_DDA2_EN; 1221 scctl1 |= TV_SC_DDA2_EN;
1224
1225 if (tv_mode->dda3_inc) 1222 if (tv_mode->dda3_inc)
1226 scctl1 |= TV_SC_DDA3_EN; 1223 scctl1 |= TV_SC_DDA3_EN;
1227
1228 scctl1 |= tv_mode->sc_reset; 1224 scctl1 |= tv_mode->sc_reset;
1229 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT; 1225 if (video_levels)
1226 scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
1230 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT; 1227 scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
1231 1228
1232 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT | 1229 scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1416 * 0 0 0 Component 1413 * 0 0 0 Component
1417 */ 1414 */
1418 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) { 1415 if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
1419 DRM_DEBUG("Detected Composite TV connection\n"); 1416 DRM_DEBUG_KMS("Detected Composite TV connection\n");
1420 type = DRM_MODE_CONNECTOR_Composite; 1417 type = DRM_MODE_CONNECTOR_Composite;
1421 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) { 1418 } else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
1422 DRM_DEBUG("Detected S-Video TV connection\n"); 1419 DRM_DEBUG_KMS("Detected S-Video TV connection\n");
1423 type = DRM_MODE_CONNECTOR_SVIDEO; 1420 type = DRM_MODE_CONNECTOR_SVIDEO;
1424 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) { 1421 } else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
1425 DRM_DEBUG("Detected Component TV connection\n"); 1422 DRM_DEBUG_KMS("Detected Component TV connection\n");
1426 type = DRM_MODE_CONNECTOR_Component; 1423 type = DRM_MODE_CONNECTOR_Component;
1427 } else { 1424 } else {
1428 DRM_DEBUG("No TV connection detected\n"); 1425 DRM_DEBUG_KMS("No TV connection detected\n");
1429 type = -1; 1426 type = -1;
1430 } 1427 }
1431 1428
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1702 .destroy = intel_tv_enc_destroy, 1699 .destroy = intel_tv_enc_destroy,
1703}; 1700};
1704 1701
1702/*
1703 * Enumerate the child dev array parsed from VBT to check whether
1704 * the integrated TV is present.
1705 * If it is present, return 1.
1706 * If it is not present, return false.
1707 * If no child dev is parsed from VBT, it assumes that the TV is present.
1708 */
1709static int tv_is_present_in_vbt(struct drm_device *dev)
1710{
1711 struct drm_i915_private *dev_priv = dev->dev_private;
1712 struct child_device_config *p_child;
1713 int i, ret;
1714
1715 if (!dev_priv->child_dev_num)
1716 return 1;
1717
1718 ret = 0;
1719 for (i = 0; i < dev_priv->child_dev_num; i++) {
1720 p_child = dev_priv->child_dev + i;
1721 /*
1722 * If the device type is not TV, continue.
1723 */
1724 if (p_child->device_type != DEVICE_TYPE_INT_TV &&
1725 p_child->device_type != DEVICE_TYPE_TV)
1726 continue;
1727 /* Only when the addin_offset is non-zero, it is regarded
1728 * as present.
1729 */
1730 if (p_child->addin_offset) {
1731 ret = 1;
1732 break;
1733 }
1734 }
1735 return ret;
1736}
1705 1737
1706void 1738void
1707intel_tv_init(struct drm_device *dev) 1739intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
1717 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) 1749 if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
1718 return; 1750 return;
1719 1751
1752 if (!tv_is_present_in_vbt(dev)) {
1753 DRM_DEBUG_KMS("Integrated TV is not present.\n");
1754 return;
1755 }
1720 /* Even if we have an encoder we may not have a connector */ 1756 /* Even if we have an encoder we may not have a connector */
1721 if (!dev_priv->int_tv_support) 1757 if (!dev_priv->int_tv_support)
1722 return; 1758 return;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
new file mode 100644
index 000000000000..d823e6319516
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -0,0 +1,44 @@
1config DRM_NOUVEAU
2 tristate "Nouveau (nVidia) cards"
3 depends on DRM
4 select FW_LOADER
5 select DRM_KMS_HELPER
6 select DRM_TTM
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 select FB
11 select FRAMEBUFFER_CONSOLE if !EMBEDDED
12 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
13 help
14 Choose this option for open-source nVidia support.
15
16config DRM_NOUVEAU_BACKLIGHT
17 bool "Support for backlight control"
18 depends on DRM_NOUVEAU
19 default y
20 help
21 Say Y here if you want to control the backlight of your display
22 (e.g. a laptop panel).
23
24config DRM_NOUVEAU_DEBUG
25 bool "Build in Nouveau's debugfs support"
26 depends on DRM_NOUVEAU && DEBUG_FS
27 default y
28 help
29 Say Y here if you want Nouveau to output debugging information
30 via debugfs.
31
32menu "I2C encoder or helper chips"
33 depends on DRM
34
35config DRM_I2C_CH7006
36 tristate "Chrontel ch7006 TV encoder"
37 default m if DRM_NOUVEAU
38 help
39 Support for Chrontel ch7006 and similar TV encoders, found
40 on some nVidia video cards.
41
42 This driver is currently only useful if you're also using
43 the nouveau driver.
44endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 000000000000..1d90d4d0144f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,31 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5ccflags-y := -Iinclude/drm
6nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
7 nouveau_object.o nouveau_irq.o nouveau_notifier.o \
8 nouveau_sgdma.o nouveau_dma.o \
9 nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
10 nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
11 nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
12 nouveau_dp.o \
13 nv04_timer.o \
14 nv04_mc.o nv40_mc.o nv50_mc.o \
15 nv04_fb.o nv10_fb.o nv40_fb.o \
16 nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
17 nv04_graph.o nv10_graph.o nv20_graph.o \
18 nv40_graph.o nv50_graph.o \
19 nv04_instmem.o nv50_instmem.o \
20 nv50_crtc.o nv50_dac.o nv50_sor.o \
21 nv50_cursor.o nv50_display.o nv50_fbcon.o \
22 nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
23 nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
24 nv17_gpio.o
25
26nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
27nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
28nouveau-$(CONFIG_DRM_NOUVEAU_BACKLIGHT) += nouveau_backlight.o
29nouveau-$(CONFIG_ACPI) += nouveau_acpi.o
30
31obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
new file mode 100644
index 000000000000..1cf488247a16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -0,0 +1,125 @@
1#include <linux/pci.h>
2#include <linux/acpi.h>
3#include <acpi/acpi_drivers.h>
4#include <acpi/acpi_bus.h>
5
6#include "drmP.h"
7#include "drm.h"
8#include "drm_sarea.h"
9#include "drm_crtc_helper.h"
10#include "nouveau_drv.h"
11#include "nouveau_drm.h"
12#include "nv50_display.h"
13
14#define NOUVEAU_DSM_SUPPORTED 0x00
15#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
16
17#define NOUVEAU_DSM_ACTIVE 0x01
18#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
19
20#define NOUVEAU_DSM_LED 0x02
21#define NOUVEAU_DSM_LED_STATE 0x00
22#define NOUVEAU_DSM_LED_OFF 0x10
23#define NOUVEAU_DSM_LED_STAMINA 0x11
24#define NOUVEAU_DSM_LED_SPEED 0x12
25
26#define NOUVEAU_DSM_POWER 0x03
27#define NOUVEAU_DSM_POWER_STATE 0x00
28#define NOUVEAU_DSM_POWER_SPEED 0x01
29#define NOUVEAU_DSM_POWER_STAMINA 0x02
30
31static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
32{
33 static char muid[] = {
34 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
35 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
36 };
37
38 struct pci_dev *pdev = dev->pdev;
39 struct acpi_handle *handle;
40 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
41 struct acpi_object_list input;
42 union acpi_object params[4];
43 union acpi_object *obj;
44 int err;
45
46 handle = DEVICE_ACPI_HANDLE(&pdev->dev);
47
48 if (!handle)
49 return -ENODEV;
50
51 input.count = 4;
52 input.pointer = params;
53 params[0].type = ACPI_TYPE_BUFFER;
54 params[0].buffer.length = sizeof(muid);
55 params[0].buffer.pointer = (char *)muid;
56 params[1].type = ACPI_TYPE_INTEGER;
57 params[1].integer.value = 0x00000102;
58 params[2].type = ACPI_TYPE_INTEGER;
59 params[2].integer.value = func;
60 params[3].type = ACPI_TYPE_INTEGER;
61 params[3].integer.value = arg;
62
63 err = acpi_evaluate_object(handle, "_DSM", &input, &output);
64 if (err) {
65 NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
66 return err;
67 }
68
69 obj = (union acpi_object *)output.pointer;
70
71 if (obj->type == ACPI_TYPE_INTEGER)
72 if (obj->integer.value == 0x80000002)
73 return -ENODEV;
74
75 if (obj->type == ACPI_TYPE_BUFFER) {
76 if (obj->buffer.length == 4 && result) {
77 *result = 0;
78 *result |= obj->buffer.pointer[0];
79 *result |= (obj->buffer.pointer[1] << 8);
80 *result |= (obj->buffer.pointer[2] << 16);
81 *result |= (obj->buffer.pointer[3] << 24);
82 }
83 }
84
85 kfree(output.pointer);
86 return 0;
87}
88
89int nouveau_hybrid_setup(struct drm_device *dev)
90{
91 int result;
92
93 if (nouveau_dsm(dev, NOUVEAU_DSM_ACTIVE, NOUVEAU_DSM_ACTIVE_QUERY,
94 &result))
95 return -ENODEV;
96
97 NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
98
99 if (result & 0x1) { /* Stamina mode - disable the external GPU */
100 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
101 NULL);
102 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
103 NULL);
104 } else { /* Ensure that the external GPU is enabled */
105 nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
106 nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
107 NULL);
108 }
109
110 return 0;
111}
112
113bool nouveau_dsm_probe(struct drm_device *dev)
114{
115 int support = 0;
116
117 if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
118 NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
119 return false;
120
121 if (!support)
122 return false;
123
124 return true;
125}
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
new file mode 100644
index 000000000000..20564f8cb0ec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 2009 Red Hat <mjg@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Matthew Garrett <mjg@redhat.com>
29 *
30 * Register locations derived from NVClock by Roderick Colenbrander
31 */
32
33#include <linux/backlight.h>
34
35#include "drmP.h"
36#include "nouveau_drv.h"
37#include "nouveau_drm.h"
38#include "nouveau_reg.h"
39
40static int nv40_get_intensity(struct backlight_device *bd)
41{
42 struct drm_device *dev = bl_get_data(bd);
43 int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
44 >> 16;
45
46 return val;
47}
48
49static int nv40_set_intensity(struct backlight_device *bd)
50{
51 struct drm_device *dev = bl_get_data(bd);
52 int val = bd->props.brightness;
53 int reg = nv_rd32(dev, NV40_PMC_BACKLIGHT);
54
55 nv_wr32(dev, NV40_PMC_BACKLIGHT,
56 (val << 16) | (reg & ~NV40_PMC_BACKLIGHT_MASK));
57
58 return 0;
59}
60
61static struct backlight_ops nv40_bl_ops = {
62 .options = BL_CORE_SUSPENDRESUME,
63 .get_brightness = nv40_get_intensity,
64 .update_status = nv40_set_intensity,
65};
66
67static int nv50_get_intensity(struct backlight_device *bd)
68{
69 struct drm_device *dev = bl_get_data(bd);
70
71 return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
72}
73
74static int nv50_set_intensity(struct backlight_device *bd)
75{
76 struct drm_device *dev = bl_get_data(bd);
77 int val = bd->props.brightness;
78
79 nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
80 val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
81 return 0;
82}
83
84static struct backlight_ops nv50_bl_ops = {
85 .options = BL_CORE_SUSPENDRESUME,
86 .get_brightness = nv50_get_intensity,
87 .update_status = nv50_set_intensity,
88};
89
90static int nouveau_nv40_backlight_init(struct drm_device *dev)
91{
92 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct backlight_device *bd;
94
95 if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
96 return 0;
97
98 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
99 &nv40_bl_ops);
100 if (IS_ERR(bd))
101 return PTR_ERR(bd);
102
103 dev_priv->backlight = bd;
104 bd->props.max_brightness = 31;
105 bd->props.brightness = nv40_get_intensity(bd);
106 backlight_update_status(bd);
107
108 return 0;
109}
110
111static int nouveau_nv50_backlight_init(struct drm_device *dev)
112{
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 struct backlight_device *bd;
115
116 if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
117 return 0;
118
119 bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
120 &nv50_bl_ops);
121 if (IS_ERR(bd))
122 return PTR_ERR(bd);
123
124 dev_priv->backlight = bd;
125 bd->props.max_brightness = 1025;
126 bd->props.brightness = nv50_get_intensity(bd);
127 backlight_update_status(bd);
128 return 0;
129}
130
131int nouveau_backlight_init(struct drm_device *dev)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134
135 switch (dev_priv->card_type) {
136 case NV_40:
137 return nouveau_nv40_backlight_init(dev);
138 case NV_50:
139 return nouveau_nv50_backlight_init(dev);
140 default:
141 break;
142 }
143
144 return 0;
145}
146
147void nouveau_backlight_exit(struct drm_device *dev)
148{
149 struct drm_nouveau_private *dev_priv = dev->dev_private;
150
151 if (dev_priv->backlight) {
152 backlight_device_unregister(dev_priv->backlight);
153 dev_priv->backlight = NULL;
154 }
155}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
new file mode 100644
index 000000000000..5eec5ed69489
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -0,0 +1,6095 @@
1/*
2 * Copyright 2005-2006 Erik Waling
3 * Copyright 2006 Stephane Marchesin
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "drmP.h"
26#define NV_DEBUG_NOTRACE
27#include "nouveau_drv.h"
28#include "nouveau_hw.h"
29
30/* these defines are made up */
31#define NV_CIO_CRE_44_HEADA 0x0
32#define NV_CIO_CRE_44_HEADB 0x3
33#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
34#define LEGACY_I2C_CRT 0x80
35#define LEGACY_I2C_PANEL 0x81
36#define LEGACY_I2C_TV 0x82
37
38#define EDID1_LEN 128
39
40#define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
41#define LOG_OLD_VALUE(x)
42
43#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
44#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
45
46struct init_exec {
47 bool execute;
48 bool repeat;
49};
50
51static bool nv_cksum(const uint8_t *data, unsigned int length)
52{
53 /*
54 * There's a few checksums in the BIOS, so here's a generic checking
55 * function.
56 */
57 int i;
58 uint8_t sum = 0;
59
60 for (i = 0; i < length; i++)
61 sum += data[i];
62
63 if (sum)
64 return true;
65
66 return false;
67}
68
69static int
70score_vbios(struct drm_device *dev, const uint8_t *data, const bool writeable)
71{
72 if (!(data[0] == 0x55 && data[1] == 0xAA)) {
73 NV_TRACEWARN(dev, "... BIOS signature not found\n");
74 return 0;
75 }
76
77 if (nv_cksum(data, data[2] * 512)) {
78 NV_TRACEWARN(dev, "... BIOS checksum invalid\n");
79 /* if a ro image is somewhat bad, it's probably all rubbish */
80 return writeable ? 2 : 1;
81 } else
82 NV_TRACE(dev, "... appears to be valid\n");
83
84 return 3;
85}
86
87static void load_vbios_prom(struct drm_device *dev, uint8_t *data)
88{
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 uint32_t pci_nv_20, save_pci_nv_20;
91 int pcir_ptr;
92 int i;
93
94 if (dev_priv->card_type >= NV_50)
95 pci_nv_20 = 0x88050;
96 else
97 pci_nv_20 = NV_PBUS_PCI_NV_20;
98
99 /* enable ROM access */
100 save_pci_nv_20 = nvReadMC(dev, pci_nv_20);
101 nvWriteMC(dev, pci_nv_20,
102 save_pci_nv_20 & ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
103
104 /* bail if no rom signature */
105 if (nv_rd08(dev, NV_PROM_OFFSET) != 0x55 ||
106 nv_rd08(dev, NV_PROM_OFFSET + 1) != 0xaa)
107 goto out;
108
109 /* additional check (see note below) - read PCI record header */
110 pcir_ptr = nv_rd08(dev, NV_PROM_OFFSET + 0x18) |
111 nv_rd08(dev, NV_PROM_OFFSET + 0x19) << 8;
112 if (nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr) != 'P' ||
113 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 1) != 'C' ||
114 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 2) != 'I' ||
115 nv_rd08(dev, NV_PROM_OFFSET + pcir_ptr + 3) != 'R')
116 goto out;
117
118 /* on some 6600GT/6800LE prom reads are messed up. nvclock alleges a
119 * a good read may be obtained by waiting or re-reading (cargocult: 5x)
120 * each byte. we'll hope pramin has something usable instead
121 */
122 for (i = 0; i < NV_PROM_SIZE; i++)
123 data[i] = nv_rd08(dev, NV_PROM_OFFSET + i);
124
125out:
126 /* disable ROM access */
127 nvWriteMC(dev, pci_nv_20,
128 save_pci_nv_20 | NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
129}
130
131static void load_vbios_pramin(struct drm_device *dev, uint8_t *data)
132{
133 struct drm_nouveau_private *dev_priv = dev->dev_private;
134 uint32_t old_bar0_pramin = 0;
135 int i;
136
137 if (dev_priv->card_type >= NV_50) {
138 uint32_t vbios_vram = (nv_rd32(dev, 0x619f04) & ~0xff) << 8;
139
140 if (!vbios_vram)
141 vbios_vram = (nv_rd32(dev, 0x1700) << 16) + 0xf0000;
142
143 old_bar0_pramin = nv_rd32(dev, 0x1700);
144 nv_wr32(dev, 0x1700, vbios_vram >> 16);
145 }
146
147 /* bail if no rom signature */
148 if (nv_rd08(dev, NV_PRAMIN_OFFSET) != 0x55 ||
149 nv_rd08(dev, NV_PRAMIN_OFFSET + 1) != 0xaa)
150 goto out;
151
152 for (i = 0; i < NV_PROM_SIZE; i++)
153 data[i] = nv_rd08(dev, NV_PRAMIN_OFFSET + i);
154
155out:
156 if (dev_priv->card_type >= NV_50)
157 nv_wr32(dev, 0x1700, old_bar0_pramin);
158}
159
160static void load_vbios_pci(struct drm_device *dev, uint8_t *data)
161{
162 void __iomem *rom = NULL;
163 size_t rom_len;
164 int ret;
165
166 ret = pci_enable_rom(dev->pdev);
167 if (ret)
168 return;
169
170 rom = pci_map_rom(dev->pdev, &rom_len);
171 if (!rom)
172 goto out;
173 memcpy_fromio(data, rom, rom_len);
174 pci_unmap_rom(dev->pdev, rom);
175
176out:
177 pci_disable_rom(dev->pdev);
178}
179
180struct methods {
181 const char desc[8];
182 void (*loadbios)(struct drm_device *, uint8_t *);
183 const bool rw;
184 int score;
185};
186
187static struct methods nv04_methods[] = {
188 { "PROM", load_vbios_prom, false },
189 { "PRAMIN", load_vbios_pramin, true },
190 { "PCIROM", load_vbios_pci, true },
191 { }
192};
193
194static struct methods nv50_methods[] = {
195 { "PRAMIN", load_vbios_pramin, true },
196 { "PROM", load_vbios_prom, false },
197 { "PCIROM", load_vbios_pci, true },
198 { }
199};
200
201static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
202{
203 struct drm_nouveau_private *dev_priv = dev->dev_private;
204 struct methods *methods, *method;
205 int testscore = 3;
206
207 if (nouveau_vbios) {
208 method = nv04_methods;
209 while (method->loadbios) {
210 if (!strcasecmp(nouveau_vbios, method->desc))
211 break;
212 method++;
213 }
214
215 if (method->loadbios) {
216 NV_INFO(dev, "Attempting to use BIOS image from %s\n",
217 method->desc);
218
219 method->loadbios(dev, data);
220 if (score_vbios(dev, data, method->rw))
221 return true;
222 }
223
224 NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
225 }
226
227 if (dev_priv->card_type < NV_50)
228 methods = nv04_methods;
229 else
230 methods = nv50_methods;
231
232 method = methods;
233 while (method->loadbios) {
234 NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
235 method->desc);
236 data[0] = data[1] = 0; /* avoid reuse of previous image */
237 method->loadbios(dev, data);
238 method->score = score_vbios(dev, data, method->rw);
239 if (method->score == testscore)
240 return true;
241 method++;
242 }
243
244 while (--testscore > 0) {
245 method = methods;
246 while (method->loadbios) {
247 if (method->score == testscore) {
248 NV_TRACE(dev, "Using BIOS image from %s\n",
249 method->desc);
250 method->loadbios(dev, data);
251 return true;
252 }
253 method++;
254 }
255 }
256
257 NV_ERROR(dev, "No valid BIOS image found\n");
258 return false;
259}
260
261struct init_tbl_entry {
262 char *name;
263 uint8_t id;
264 int length;
265 int length_offset;
266 int length_multiplier;
267 bool (*handler)(struct nvbios *, uint16_t, struct init_exec *);
268};
269
270struct bit_entry {
271 uint8_t id[2];
272 uint16_t length;
273 uint16_t offset;
274};
275
276static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
277
278#define MACRO_INDEX_SIZE 2
279#define MACRO_SIZE 8
280#define CONDITION_SIZE 12
281#define IO_FLAG_CONDITION_SIZE 9
282#define IO_CONDITION_SIZE 5
283#define MEM_INIT_SIZE 66
284
285static void still_alive(void)
286{
287#if 0
288 sync();
289 msleep(2);
290#endif
291}
292
293static uint32_t
294munge_reg(struct nvbios *bios, uint32_t reg)
295{
296 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
297 struct dcb_entry *dcbent = bios->display.output;
298
299 if (dev_priv->card_type < NV_50)
300 return reg;
301
302 if (reg & 0x40000000) {
303 BUG_ON(!dcbent);
304
305 reg += (ffs(dcbent->or) - 1) * 0x800;
306 if ((reg & 0x20000000) && !(dcbent->sorconf.link & 1))
307 reg += 0x00000080;
308 }
309
310 reg &= ~0x60000000;
311 return reg;
312}
313
314static int
315valid_reg(struct nvbios *bios, uint32_t reg)
316{
317 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
318 struct drm_device *dev = bios->dev;
319
320 /* C51 has misaligned regs on purpose. Marvellous */
321 if (reg & 0x2 || (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51)) {
322 NV_ERROR(dev, "========== misaligned reg 0x%08X ==========\n",
323 reg);
324 return 0;
325 }
326 /*
327 * Warn on C51 regs that have not been verified accessible in
328 * mmiotracing
329 */
330 if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
331 reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
332 NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
333 reg);
334
335 /* Trust the init scripts on G80 */
336 if (dev_priv->card_type >= NV_50)
337 return 1;
338
339 #define WITHIN(x, y, z) ((x >= y) && (x < y + z))
340 if (WITHIN(reg, NV_PMC_OFFSET, NV_PMC_SIZE))
341 return 1;
342 if (WITHIN(reg, NV_PBUS_OFFSET, NV_PBUS_SIZE))
343 return 1;
344 if (WITHIN(reg, NV_PFIFO_OFFSET, NV_PFIFO_SIZE))
345 return 1;
346 if (dev_priv->VBIOS.pub.chip_version >= 0x30 &&
347 (WITHIN(reg, 0x4000, 0x600) || reg == 0x00004600))
348 return 1;
349 if (dev_priv->VBIOS.pub.chip_version >= 0x40 &&
350 WITHIN(reg, 0xc000, 0x48))
351 return 1;
352 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0000d204)
353 return 1;
354 if (dev_priv->VBIOS.pub.chip_version >= 0x40) {
355 if (reg == 0x00011014 || reg == 0x00020328)
356 return 1;
357 if (WITHIN(reg, 0x88000, NV_PBUS_SIZE)) /* new PBUS */
358 return 1;
359 }
360 if (WITHIN(reg, NV_PFB_OFFSET, NV_PFB_SIZE))
361 return 1;
362 if (WITHIN(reg, NV_PEXTDEV_OFFSET, NV_PEXTDEV_SIZE))
363 return 1;
364 if (WITHIN(reg, NV_PCRTC0_OFFSET, NV_PCRTC0_SIZE * 2))
365 return 1;
366 if (WITHIN(reg, NV_PRAMDAC0_OFFSET, NV_PRAMDAC0_SIZE * 2))
367 return 1;
368 if (dev_priv->VBIOS.pub.chip_version >= 0x17 && reg == 0x0070fff0)
369 return 1;
370 if (dev_priv->VBIOS.pub.chip_version == 0x51 &&
371 WITHIN(reg, NV_PRAMIN_OFFSET, NV_PRAMIN_SIZE))
372 return 1;
373 #undef WITHIN
374
375 NV_ERROR(dev, "========== unknown reg 0x%08X ==========\n", reg);
376
377 return 0;
378}
379
380static bool
381valid_idx_port(struct nvbios *bios, uint16_t port)
382{
383 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
384 struct drm_device *dev = bios->dev;
385
386 /*
387 * If adding more ports here, the read/write functions below will need
388 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
389 * used for the port in question
390 */
391 if (dev_priv->card_type < NV_50) {
392 if (port == NV_CIO_CRX__COLOR)
393 return true;
394 if (port == NV_VIO_SRX)
395 return true;
396 } else {
397 if (port == NV_CIO_CRX__COLOR)
398 return true;
399 }
400
401 NV_ERROR(dev, "========== unknown indexed io port 0x%04X ==========\n",
402 port);
403
404 return false;
405}
406
407static bool
408valid_port(struct nvbios *bios, uint16_t port)
409{
410 struct drm_device *dev = bios->dev;
411
412 /*
413 * If adding more ports here, the read/write functions below will need
414 * updating so that the correct mmio range (PRMCIO, PRMDIO, PRMVIO) is
415 * used for the port in question
416 */
417 if (port == NV_VIO_VSE2)
418 return true;
419
420 NV_ERROR(dev, "========== unknown io port 0x%04X ==========\n", port);
421
422 return false;
423}
424
425static uint32_t
426bios_rd32(struct nvbios *bios, uint32_t reg)
427{
428 uint32_t data;
429
430 reg = munge_reg(bios, reg);
431 if (!valid_reg(bios, reg))
432 return 0;
433
434 /*
435 * C51 sometimes uses regs with bit0 set in the address. For these
436 * cases there should exist a translation in a BIOS table to an IO
437 * port address which the BIOS uses for accessing the reg
438 *
439 * These only seem to appear for the power control regs to a flat panel,
440 * and the GPIO regs at 0x60081*. In C51 mmio traces the normal regs
441 * for 0x1308 and 0x1310 are used - hence the mask below. An S3
442 * suspend-resume mmio trace from a C51 will be required to see if this
443 * is true for the power microcode in 0x14.., or whether the direct IO
444 * port access method is needed
445 */
446 if (reg & 0x1)
447 reg &= ~0x1;
448
449 data = nv_rd32(bios->dev, reg);
450
451 BIOSLOG(bios, " Read: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
452
453 return data;
454}
455
456static void
457bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
458{
459 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
460
461 reg = munge_reg(bios, reg);
462 if (!valid_reg(bios, reg))
463 return;
464
465 /* see note in bios_rd32 */
466 if (reg & 0x1)
467 reg &= 0xfffffffe;
468
469 LOG_OLD_VALUE(bios_rd32(bios, reg));
470 BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
471
472 if (dev_priv->VBIOS.execute) {
473 still_alive();
474 nv_wr32(bios->dev, reg, data);
475 }
476}
477
478static uint8_t
479bios_idxprt_rd(struct nvbios *bios, uint16_t port, uint8_t index)
480{
481 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
482 struct drm_device *dev = bios->dev;
483 uint8_t data;
484
485 if (!valid_idx_port(bios, port))
486 return 0;
487
488 if (dev_priv->card_type < NV_50) {
489 if (port == NV_VIO_SRX)
490 data = NVReadVgaSeq(dev, bios->state.crtchead, index);
491 else /* assume NV_CIO_CRX__COLOR */
492 data = NVReadVgaCrtc(dev, bios->state.crtchead, index);
493 } else {
494 uint32_t data32;
495
496 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
497 data = (data32 >> ((index & 3) << 3)) & 0xff;
498 }
499
500 BIOSLOG(bios, " Indexed IO read: Port: 0x%04X, Index: 0x%02X, "
501 "Head: 0x%02X, Data: 0x%02X\n",
502 port, index, bios->state.crtchead, data);
503 return data;
504}
505
506static void
507bios_idxprt_wr(struct nvbios *bios, uint16_t port, uint8_t index, uint8_t data)
508{
509 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
510 struct drm_device *dev = bios->dev;
511
512 if (!valid_idx_port(bios, port))
513 return;
514
515 /*
516 * The current head is maintained in the nvbios member state.crtchead.
517 * We trap changes to CR44 and update the head variable and hence the
518 * register set written.
519 * As CR44 only exists on CRTC0, we update crtchead to head0 in advance
520 * of the write, and to head1 after the write
521 */
522 if (port == NV_CIO_CRX__COLOR && index == NV_CIO_CRE_44 &&
523 data != NV_CIO_CRE_44_HEADB)
524 bios->state.crtchead = 0;
525
526 LOG_OLD_VALUE(bios_idxprt_rd(bios, port, index));
527 BIOSLOG(bios, " Indexed IO write: Port: 0x%04X, Index: 0x%02X, "
528 "Head: 0x%02X, Data: 0x%02X\n",
529 port, index, bios->state.crtchead, data);
530
531 if (bios->execute && dev_priv->card_type < NV_50) {
532 still_alive();
533 if (port == NV_VIO_SRX)
534 NVWriteVgaSeq(dev, bios->state.crtchead, index, data);
535 else /* assume NV_CIO_CRX__COLOR */
536 NVWriteVgaCrtc(dev, bios->state.crtchead, index, data);
537 } else
538 if (bios->execute) {
539 uint32_t data32, shift = (index & 3) << 3;
540
541 still_alive();
542
543 data32 = bios_rd32(bios, NV50_PDISPLAY_VGACRTC(index & ~3));
544 data32 &= ~(0xff << shift);
545 data32 |= (data << shift);
546 bios_wr32(bios, NV50_PDISPLAY_VGACRTC(index & ~3), data32);
547 }
548
549 if (port == NV_CIO_CRX__COLOR &&
550 index == NV_CIO_CRE_44 && data == NV_CIO_CRE_44_HEADB)
551 bios->state.crtchead = 1;
552}
553
554static uint8_t
555bios_port_rd(struct nvbios *bios, uint16_t port)
556{
557 uint8_t data, head = bios->state.crtchead;
558
559 if (!valid_port(bios, port))
560 return 0;
561
562 data = NVReadPRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port);
563
564 BIOSLOG(bios, " IO read: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
565 port, head, data);
566
567 return data;
568}
569
570static void
571bios_port_wr(struct nvbios *bios, uint16_t port, uint8_t data)
572{
573 int head = bios->state.crtchead;
574
575 if (!valid_port(bios, port))
576 return;
577
578 LOG_OLD_VALUE(bios_port_rd(bios, port));
579 BIOSLOG(bios, " IO write: Port: 0x%04X, Head: 0x%02X, Data: 0x%02X\n",
580 port, head, data);
581
582 if (!bios->execute)
583 return;
584
585 still_alive();
586 NVWritePRMVIO(bios->dev, head, NV_PRMVIO0_OFFSET + port, data);
587}
588
589static bool
590io_flag_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
591{
592 /*
593 * The IO flag condition entry has 2 bytes for the CRTC port; 1 byte
594 * for the CRTC index; 1 byte for the mask to apply to the value
595 * retrieved from the CRTC; 1 byte for the shift right to apply to the
596 * masked CRTC value; 2 bytes for the offset to the flag array, to
597 * which the shifted value is added; 1 byte for the mask applied to the
598 * value read from the flag array; and 1 byte for the value to compare
599 * against the masked byte from the flag table.
600 */
601
602 uint16_t condptr = bios->io_flag_condition_tbl_ptr + cond * IO_FLAG_CONDITION_SIZE;
603 uint16_t crtcport = ROM16(bios->data[condptr]);
604 uint8_t crtcindex = bios->data[condptr + 2];
605 uint8_t mask = bios->data[condptr + 3];
606 uint8_t shift = bios->data[condptr + 4];
607 uint16_t flagarray = ROM16(bios->data[condptr + 5]);
608 uint8_t flagarraymask = bios->data[condptr + 7];
609 uint8_t cmpval = bios->data[condptr + 8];
610 uint8_t data;
611
612 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
613 "Shift: 0x%02X, FlagArray: 0x%04X, FAMask: 0x%02X, "
614 "Cmpval: 0x%02X\n",
615 offset, crtcport, crtcindex, mask, shift, flagarray, flagarraymask, cmpval);
616
617 data = bios_idxprt_rd(bios, crtcport, crtcindex);
618
619 data = bios->data[flagarray + ((data & mask) >> shift)];
620 data &= flagarraymask;
621
622 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
623 offset, data, cmpval);
624
625 return (data == cmpval);
626}
627
628static bool
629bios_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
630{
631 /*
632 * The condition table entry has 4 bytes for the address of the
633 * register to check, 4 bytes for a mask to apply to the register and
634 * 4 for a test comparison value
635 */
636
637 uint16_t condptr = bios->condition_tbl_ptr + cond * CONDITION_SIZE;
638 uint32_t reg = ROM32(bios->data[condptr]);
639 uint32_t mask = ROM32(bios->data[condptr + 4]);
640 uint32_t cmpval = ROM32(bios->data[condptr + 8]);
641 uint32_t data;
642
643 BIOSLOG(bios, "0x%04X: Cond: 0x%02X, Reg: 0x%08X, Mask: 0x%08X\n",
644 offset, cond, reg, mask);
645
646 data = bios_rd32(bios, reg) & mask;
647
648 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
649 offset, data, cmpval);
650
651 return (data == cmpval);
652}
653
654static bool
655io_condition_met(struct nvbios *bios, uint16_t offset, uint8_t cond)
656{
657 /*
658 * The IO condition entry has 2 bytes for the IO port address; 1 byte
659 * for the index to write to io_port; 1 byte for the mask to apply to
660 * the byte read from io_port+1; and 1 byte for the value to compare
661 * against the masked byte.
662 */
663
664 uint16_t condptr = bios->io_condition_tbl_ptr + cond * IO_CONDITION_SIZE;
665 uint16_t io_port = ROM16(bios->data[condptr]);
666 uint8_t port_index = bios->data[condptr + 2];
667 uint8_t mask = bios->data[condptr + 3];
668 uint8_t cmpval = bios->data[condptr + 4];
669
670 uint8_t data = bios_idxprt_rd(bios, io_port, port_index) & mask;
671
672 BIOSLOG(bios, "0x%04X: Checking if 0x%02X equals 0x%02X\n",
673 offset, data, cmpval);
674
675 return (data == cmpval);
676}
677
678static int
679nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
680{
681 struct drm_nouveau_private *dev_priv = dev->dev_private;
682 uint32_t reg0 = nv_rd32(dev, reg + 0);
683 uint32_t reg1 = nv_rd32(dev, reg + 4);
684 struct nouveau_pll_vals pll;
685 struct pll_lims pll_limits;
686 int ret;
687
688 ret = get_pll_limits(dev, reg, &pll_limits);
689 if (ret)
690 return ret;
691
692 clk = nouveau_calc_pll_mnp(dev, &pll_limits, clk, &pll);
693 if (!clk)
694 return -ERANGE;
695
696 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
697 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
698
699 if (dev_priv->VBIOS.execute) {
700 still_alive();
701 nv_wr32(dev, reg + 4, reg1);
702 nv_wr32(dev, reg + 0, reg0);
703 }
704
705 return 0;
706}
707
708static int
709setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
710{
711 struct drm_device *dev = bios->dev;
712 struct drm_nouveau_private *dev_priv = dev->dev_private;
713 /* clk in kHz */
714 struct pll_lims pll_lim;
715 struct nouveau_pll_vals pllvals;
716 int ret;
717
718 if (dev_priv->card_type >= NV_50)
719 return nv50_pll_set(dev, reg, clk);
720
721 /* high regs (such as in the mac g5 table) are not -= 4 */
722 ret = get_pll_limits(dev, reg > 0x405c ? reg : reg - 4, &pll_lim);
723 if (ret)
724 return ret;
725
726 clk = nouveau_calc_pll_mnp(dev, &pll_lim, clk, &pllvals);
727 if (!clk)
728 return -ERANGE;
729
730 if (bios->execute) {
731 still_alive();
732 nouveau_hw_setpll(dev, reg, &pllvals);
733 }
734
735 return 0;
736}
737
738static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
739{
740 struct drm_nouveau_private *dev_priv = dev->dev_private;
741 struct nvbios *bios = &dev_priv->VBIOS;
742
743 /*
744 * For the results of this function to be correct, CR44 must have been
745 * set (using bios_idxprt_wr to set crtchead), CR58 set for CR57 = 0,
746 * and the DCB table parsed, before the script calling the function is
747 * run. run_digital_op_script is example of how to do such setup
748 */
749
750 uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
751
752 if (dcb_entry > bios->bdcb.dcb.entries) {
753 NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
754 "(%02X)\n", dcb_entry);
755 dcb_entry = 0x7f; /* unused / invalid marker */
756 }
757
758 return dcb_entry;
759}
760
761static struct nouveau_i2c_chan *
762init_i2c_device_find(struct drm_device *dev, int i2c_index)
763{
764 struct drm_nouveau_private *dev_priv = dev->dev_private;
765 struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
766
767 if (i2c_index == 0xff) {
768 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
769 int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
770 int default_indices = bdcb->i2c_default_indices;
771
772 if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
773 shift = 4;
774
775 i2c_index = (default_indices >> shift) & 0xf;
776 }
777 if (i2c_index == 0x80) /* g80+ */
778 i2c_index = bdcb->i2c_default_indices & 0xf;
779
780 return nouveau_i2c_find(dev, i2c_index);
781}
782
783static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
784{
785 /*
786 * For mlv < 0x80, it is an index into a table of TMDS base addresses.
787 * For mlv == 0x80 use the "or" value of the dcb_entry indexed by
788 * CR58 for CR57 = 0 to index a table of offsets to the basic
789 * 0x6808b0 address.
790 * For mlv == 0x81 use the "or" value of the dcb_entry indexed by
791 * CR58 for CR57 = 0 to index a table of offsets to the basic
792 * 0x6808b0 address, and then flip the offset by 8.
793 */
794
795 struct drm_nouveau_private *dev_priv = dev->dev_private;
796 const int pramdac_offset[13] = {
797 0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
798 const uint32_t pramdac_table[4] = {
799 0x6808b0, 0x6808b8, 0x6828b0, 0x6828b8 };
800
801 if (mlv >= 0x80) {
802 int dcb_entry, dacoffset;
803
804 /* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
805 dcb_entry = dcb_entry_idx_from_crtchead(dev);
806 if (dcb_entry == 0x7f)
807 return 0;
808 dacoffset = pramdac_offset[
809 dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
810 if (mlv == 0x81)
811 dacoffset ^= 8;
812 return 0x6808b0 + dacoffset;
813 } else {
814 if (mlv > ARRAY_SIZE(pramdac_table)) {
815 NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
816 mlv);
817 return 0;
818 }
819 return pramdac_table[mlv];
820 }
821}
822
823static bool
824init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
825 struct init_exec *iexec)
826{
827 /*
828 * INIT_IO_RESTRICT_PROG opcode: 0x32 ('2')
829 *
830 * offset (8 bit): opcode
831 * offset + 1 (16 bit): CRTC port
832 * offset + 3 (8 bit): CRTC index
833 * offset + 4 (8 bit): mask
834 * offset + 5 (8 bit): shift
835 * offset + 6 (8 bit): count
836 * offset + 7 (32 bit): register
837 * offset + 11 (32 bit): configuration 1
838 * ...
839 *
840 * Starting at offset + 11 there are "count" 32 bit values.
841 * To find out which value to use read index "CRTC index" on "CRTC
842 * port", AND this value with "mask" and then bit shift right "shift"
843 * bits. Read the appropriate value using this index and write to
844 * "register"
845 */
846
847 uint16_t crtcport = ROM16(bios->data[offset + 1]);
848 uint8_t crtcindex = bios->data[offset + 3];
849 uint8_t mask = bios->data[offset + 4];
850 uint8_t shift = bios->data[offset + 5];
851 uint8_t count = bios->data[offset + 6];
852 uint32_t reg = ROM32(bios->data[offset + 7]);
853 uint8_t config;
854 uint32_t configval;
855
856 if (!iexec->execute)
857 return true;
858
859 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
860 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
861 offset, crtcport, crtcindex, mask, shift, count, reg);
862
863 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
864 if (config > count) {
865 NV_ERROR(bios->dev,
866 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
867 offset, config, count);
868 return false;
869 }
870
871 configval = ROM32(bios->data[offset + 11 + config * 4]);
872
873 BIOSLOG(bios, "0x%04X: Writing config %02X\n", offset, config);
874
875 bios_wr32(bios, reg, configval);
876
877 return true;
878}
879
880static bool
881init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
882{
883 /*
884 * INIT_REPEAT opcode: 0x33 ('3')
885 *
886 * offset (8 bit): opcode
887 * offset + 1 (8 bit): count
888 *
889 * Execute script following this opcode up to INIT_REPEAT_END
890 * "count" times
891 */
892
893 uint8_t count = bios->data[offset + 1];
894 uint8_t i;
895
896 /* no iexec->execute check by design */
897
898 BIOSLOG(bios, "0x%04X: Repeating following segment %d times\n",
899 offset, count);
900
901 iexec->repeat = true;
902
903 /*
904 * count - 1, as the script block will execute once when we leave this
905 * opcode -- this is compatible with bios behaviour as:
906 * a) the block is always executed at least once, even if count == 0
907 * b) the bios interpreter skips to the op following INIT_END_REPEAT,
908 * while we don't
909 */
910 for (i = 0; i < count - 1; i++)
911 parse_init_table(bios, offset + 2, iexec);
912
913 iexec->repeat = false;
914
915 return true;
916}
917
918static bool
919init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
920 struct init_exec *iexec)
921{
922 /*
923 * INIT_IO_RESTRICT_PLL opcode: 0x34 ('4')
924 *
925 * offset (8 bit): opcode
926 * offset + 1 (16 bit): CRTC port
927 * offset + 3 (8 bit): CRTC index
928 * offset + 4 (8 bit): mask
929 * offset + 5 (8 bit): shift
930 * offset + 6 (8 bit): IO flag condition index
931 * offset + 7 (8 bit): count
932 * offset + 8 (32 bit): register
933 * offset + 12 (16 bit): frequency 1
934 * ...
935 *
936 * Starting at offset + 12 there are "count" 16 bit frequencies (10kHz).
937 * Set PLL register "register" to coefficients for frequency n,
938 * selected by reading index "CRTC index" of "CRTC port" ANDed with
939 * "mask" and shifted right by "shift".
940 *
941 * If "IO flag condition index" > 0, and condition met, double
942 * frequency before setting it.
943 */
944
945 uint16_t crtcport = ROM16(bios->data[offset + 1]);
946 uint8_t crtcindex = bios->data[offset + 3];
947 uint8_t mask = bios->data[offset + 4];
948 uint8_t shift = bios->data[offset + 5];
949 int8_t io_flag_condition_idx = bios->data[offset + 6];
950 uint8_t count = bios->data[offset + 7];
951 uint32_t reg = ROM32(bios->data[offset + 8]);
952 uint8_t config;
953 uint16_t freq;
954
955 if (!iexec->execute)
956 return true;
957
958 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
959 "Shift: 0x%02X, IO Flag Condition: 0x%02X, "
960 "Count: 0x%02X, Reg: 0x%08X\n",
961 offset, crtcport, crtcindex, mask, shift,
962 io_flag_condition_idx, count, reg);
963
964 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
965 if (config > count) {
966 NV_ERROR(bios->dev,
967 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
968 offset, config, count);
969 return false;
970 }
971
972 freq = ROM16(bios->data[offset + 12 + config * 2]);
973
974 if (io_flag_condition_idx > 0) {
975 if (io_flag_condition_met(bios, offset, io_flag_condition_idx)) {
976 BIOSLOG(bios, "0x%04X: Condition fulfilled -- "
977 "frequency doubled\n", offset);
978 freq *= 2;
979 } else
980 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- "
981 "frequency unchanged\n", offset);
982 }
983
984 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %d0kHz\n",
985 offset, reg, config, freq);
986
987 setPLL(bios, reg, freq * 10);
988
989 return true;
990}
991
992static bool
993init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
994{
995 /*
996 * INIT_END_REPEAT opcode: 0x36 ('6')
997 *
998 * offset (8 bit): opcode
999 *
1000 * Marks the end of the block for INIT_REPEAT to repeat
1001 */
1002
1003 /* no iexec->execute check by design */
1004
1005 /*
1006 * iexec->repeat flag necessary to go past INIT_END_REPEAT opcode when
1007 * we're not in repeat mode
1008 */
1009 if (iexec->repeat)
1010 return false;
1011
1012 return true;
1013}
1014
1015static bool
1016init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1017{
1018 /*
1019 * INIT_COPY opcode: 0x37 ('7')
1020 *
1021 * offset (8 bit): opcode
1022 * offset + 1 (32 bit): register
1023 * offset + 5 (8 bit): shift
1024 * offset + 6 (8 bit): srcmask
1025 * offset + 7 (16 bit): CRTC port
1026 * offset + 9 (8 bit): CRTC index
1027 * offset + 10 (8 bit): mask
1028 *
1029 * Read index "CRTC index" on "CRTC port", AND with "mask", OR with
1030 * (REGVAL("register") >> "shift" & "srcmask") and write-back to CRTC
1031 * port
1032 */
1033
1034 uint32_t reg = ROM32(bios->data[offset + 1]);
1035 uint8_t shift = bios->data[offset + 5];
1036 uint8_t srcmask = bios->data[offset + 6];
1037 uint16_t crtcport = ROM16(bios->data[offset + 7]);
1038 uint8_t crtcindex = bios->data[offset + 9];
1039 uint8_t mask = bios->data[offset + 10];
1040 uint32_t data;
1041 uint8_t crtcdata;
1042
1043 if (!iexec->execute)
1044 return true;
1045
1046 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, "
1047 "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n",
1048 offset, reg, shift, srcmask, crtcport, crtcindex, mask);
1049
1050 data = bios_rd32(bios, reg);
1051
1052 if (shift < 0x80)
1053 data >>= shift;
1054 else
1055 data <<= (0x100 - shift);
1056
1057 data &= srcmask;
1058
1059 crtcdata = bios_idxprt_rd(bios, crtcport, crtcindex) & mask;
1060 crtcdata |= (uint8_t)data;
1061 bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata);
1062
1063 return true;
1064}
1065
1066static bool
1067init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1068{
1069 /*
1070 * INIT_NOT opcode: 0x38 ('8')
1071 *
1072 * offset (8 bit): opcode
1073 *
1074 * Invert the current execute / no-execute condition (i.e. "else")
1075 */
1076 if (iexec->execute)
1077 BIOSLOG(bios, "0x%04X: ------ Skipping following commands ------\n", offset);
1078 else
1079 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset);
1080
1081 iexec->execute = !iexec->execute;
1082 return true;
1083}
1084
1085static bool
1086init_io_flag_condition(struct nvbios *bios, uint16_t offset,
1087 struct init_exec *iexec)
1088{
1089 /*
1090 * INIT_IO_FLAG_CONDITION opcode: 0x39 ('9')
1091 *
1092 * offset (8 bit): opcode
1093 * offset + 1 (8 bit): condition number
1094 *
1095 * Check condition "condition number" in the IO flag condition table.
1096 * If condition not met skip subsequent opcodes until condition is
1097 * inverted (INIT_NOT), or we hit INIT_RESUME
1098 */
1099
1100 uint8_t cond = bios->data[offset + 1];
1101
1102 if (!iexec->execute)
1103 return true;
1104
1105 if (io_flag_condition_met(bios, offset, cond))
1106 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
1107 else {
1108 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
1109 iexec->execute = false;
1110 }
1111
1112 return true;
1113}
1114
1115static bool
1116init_idx_addr_latched(struct nvbios *bios, uint16_t offset,
1117 struct init_exec *iexec)
1118{
1119 /*
1120 * INIT_INDEX_ADDRESS_LATCHED opcode: 0x49 ('I')
1121 *
1122 * offset (8 bit): opcode
1123 * offset + 1 (32 bit): control register
1124 * offset + 5 (32 bit): data register
1125 * offset + 9 (32 bit): mask
1126 * offset + 13 (32 bit): data
1127 * offset + 17 (8 bit): count
1128 * offset + 18 (8 bit): address 1
1129 * offset + 19 (8 bit): data 1
1130 * ...
1131 *
1132 * For each of "count" address and data pairs, write "data n" to
1133 * "data register", read the current value of "control register",
1134 * and write it back once ANDed with "mask", ORed with "data",
1135 * and ORed with "address n"
1136 */
1137
1138 uint32_t controlreg = ROM32(bios->data[offset + 1]);
1139 uint32_t datareg = ROM32(bios->data[offset + 5]);
1140 uint32_t mask = ROM32(bios->data[offset + 9]);
1141 uint32_t data = ROM32(bios->data[offset + 13]);
1142 uint8_t count = bios->data[offset + 17];
1143 uint32_t value;
1144 int i;
1145
1146 if (!iexec->execute)
1147 return true;
1148
1149 BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, "
1150 "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n",
1151 offset, controlreg, datareg, mask, data, count);
1152
1153 for (i = 0; i < count; i++) {
1154 uint8_t instaddress = bios->data[offset + 18 + i * 2];
1155 uint8_t instdata = bios->data[offset + 19 + i * 2];
1156
1157 BIOSLOG(bios, "0x%04X: Address: 0x%02X, Data: 0x%02X\n",
1158 offset, instaddress, instdata);
1159
1160 bios_wr32(bios, datareg, instdata);
1161 value = bios_rd32(bios, controlreg) & mask;
1162 value |= data;
1163 value |= instaddress;
1164 bios_wr32(bios, controlreg, value);
1165 }
1166
1167 return true;
1168}
1169
1170static bool
1171init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
1172 struct init_exec *iexec)
1173{
1174 /*
1175 * INIT_IO_RESTRICT_PLL2 opcode: 0x4A ('J')
1176 *
1177 * offset (8 bit): opcode
1178 * offset + 1 (16 bit): CRTC port
1179 * offset + 3 (8 bit): CRTC index
1180 * offset + 4 (8 bit): mask
1181 * offset + 5 (8 bit): shift
1182 * offset + 6 (8 bit): count
1183 * offset + 7 (32 bit): register
1184 * offset + 11 (32 bit): frequency 1
1185 * ...
1186 *
1187 * Starting at offset + 11 there are "count" 32 bit frequencies (kHz).
1188 * Set PLL register "register" to coefficients for frequency n,
1189 * selected by reading index "CRTC index" of "CRTC port" ANDed with
1190 * "mask" and shifted right by "shift".
1191 */
1192
1193 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1194 uint8_t crtcindex = bios->data[offset + 3];
1195 uint8_t mask = bios->data[offset + 4];
1196 uint8_t shift = bios->data[offset + 5];
1197 uint8_t count = bios->data[offset + 6];
1198 uint32_t reg = ROM32(bios->data[offset + 7]);
1199 uint8_t config;
1200 uint32_t freq;
1201
1202 if (!iexec->execute)
1203 return true;
1204
1205 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
1206 "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n",
1207 offset, crtcport, crtcindex, mask, shift, count, reg);
1208
1209 if (!reg)
1210 return true;
1211
1212 config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift;
1213 if (config > count) {
1214 NV_ERROR(bios->dev,
1215 "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
1216 offset, config, count);
1217 return false;
1218 }
1219
1220 freq = ROM32(bios->data[offset + 11 + config * 4]);
1221
1222 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Config: 0x%02X, Freq: %dkHz\n",
1223 offset, reg, config, freq);
1224
1225 setPLL(bios, reg, freq);
1226
1227 return true;
1228}
1229
1230static bool
1231init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1232{
1233 /*
1234 * INIT_PLL2 opcode: 0x4B ('K')
1235 *
1236 * offset (8 bit): opcode
1237 * offset + 1 (32 bit): register
1238 * offset + 5 (32 bit): freq
1239 *
1240 * Set PLL register "register" to coefficients for frequency "freq"
1241 */
1242
1243 uint32_t reg = ROM32(bios->data[offset + 1]);
1244 uint32_t freq = ROM32(bios->data[offset + 5]);
1245
1246 if (!iexec->execute)
1247 return true;
1248
1249 BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n",
1250 offset, reg, freq);
1251
1252 setPLL(bios, reg, freq);
1253 return true;
1254}
1255
1256static bool
1257init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1258{
1259 /*
1260 * INIT_I2C_BYTE opcode: 0x4C ('L')
1261 *
1262 * offset (8 bit): opcode
1263 * offset + 1 (8 bit): DCB I2C table entry index
1264 * offset + 2 (8 bit): I2C slave address
1265 * offset + 3 (8 bit): count
1266 * offset + 4 (8 bit): I2C register 1
1267 * offset + 5 (8 bit): mask 1
1268 * offset + 6 (8 bit): data 1
1269 * ...
1270 *
1271 * For each of "count" registers given by "I2C register n" on the device
1272 * addressed by "I2C slave address" on the I2C bus given by
1273 * "DCB I2C table entry index", read the register, AND the result with
1274 * "mask n" and OR it with "data n" before writing it back to the device
1275 */
1276
1277 uint8_t i2c_index = bios->data[offset + 1];
1278 uint8_t i2c_address = bios->data[offset + 2];
1279 uint8_t count = bios->data[offset + 3];
1280 struct nouveau_i2c_chan *chan;
1281 struct i2c_msg msg;
1282 int i;
1283
1284 if (!iexec->execute)
1285 return true;
1286
1287 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1288 "Count: 0x%02X\n",
1289 offset, i2c_index, i2c_address, count);
1290
1291 chan = init_i2c_device_find(bios->dev, i2c_index);
1292 if (!chan)
1293 return false;
1294
1295 for (i = 0; i < count; i++) {
1296 uint8_t i2c_reg = bios->data[offset + 4 + i * 3];
1297 uint8_t mask = bios->data[offset + 5 + i * 3];
1298 uint8_t data = bios->data[offset + 6 + i * 3];
1299 uint8_t value;
1300
1301 msg.addr = i2c_address;
1302 msg.flags = I2C_M_RD;
1303 msg.len = 1;
1304 msg.buf = &value;
1305 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1306 return false;
1307
1308 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
1309 "Mask: 0x%02X, Data: 0x%02X\n",
1310 offset, i2c_reg, value, mask, data);
1311
1312 value = (value & mask) | data;
1313
1314 if (bios->execute) {
1315 msg.addr = i2c_address;
1316 msg.flags = 0;
1317 msg.len = 1;
1318 msg.buf = &value;
1319 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1320 return false;
1321 }
1322 }
1323
1324 return true;
1325}
1326
1327static bool
1328init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1329{
1330 /*
1331 * INIT_ZM_I2C_BYTE opcode: 0x4D ('M')
1332 *
1333 * offset (8 bit): opcode
1334 * offset + 1 (8 bit): DCB I2C table entry index
1335 * offset + 2 (8 bit): I2C slave address
1336 * offset + 3 (8 bit): count
1337 * offset + 4 (8 bit): I2C register 1
1338 * offset + 5 (8 bit): data 1
1339 * ...
1340 *
1341 * For each of "count" registers given by "I2C register n" on the device
1342 * addressed by "I2C slave address" on the I2C bus given by
1343 * "DCB I2C table entry index", set the register to "data n"
1344 */
1345
1346 uint8_t i2c_index = bios->data[offset + 1];
1347 uint8_t i2c_address = bios->data[offset + 2];
1348 uint8_t count = bios->data[offset + 3];
1349 struct nouveau_i2c_chan *chan;
1350 struct i2c_msg msg;
1351 int i;
1352
1353 if (!iexec->execute)
1354 return true;
1355
1356 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1357 "Count: 0x%02X\n",
1358 offset, i2c_index, i2c_address, count);
1359
1360 chan = init_i2c_device_find(bios->dev, i2c_index);
1361 if (!chan)
1362 return false;
1363
1364 for (i = 0; i < count; i++) {
1365 uint8_t i2c_reg = bios->data[offset + 4 + i * 2];
1366 uint8_t data = bios->data[offset + 5 + i * 2];
1367
1368 BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Data: 0x%02X\n",
1369 offset, i2c_reg, data);
1370
1371 if (bios->execute) {
1372 msg.addr = i2c_address;
1373 msg.flags = 0;
1374 msg.len = 1;
1375 msg.buf = &data;
1376 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1377 return false;
1378 }
1379 }
1380
1381 return true;
1382}
1383
1384static bool
1385init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1386{
1387 /*
1388 * INIT_ZM_I2C opcode: 0x4E ('N')
1389 *
1390 * offset (8 bit): opcode
1391 * offset + 1 (8 bit): DCB I2C table entry index
1392 * offset + 2 (8 bit): I2C slave address
1393 * offset + 3 (8 bit): count
1394 * offset + 4 (8 bit): data 1
1395 * ...
1396 *
1397 * Send "count" bytes ("data n") to the device addressed by "I2C slave
1398 * address" on the I2C bus given by "DCB I2C table entry index"
1399 */
1400
1401 uint8_t i2c_index = bios->data[offset + 1];
1402 uint8_t i2c_address = bios->data[offset + 2];
1403 uint8_t count = bios->data[offset + 3];
1404 struct nouveau_i2c_chan *chan;
1405 struct i2c_msg msg;
1406 uint8_t data[256];
1407 int i;
1408
1409 if (!iexec->execute)
1410 return true;
1411
1412 BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, "
1413 "Count: 0x%02X\n",
1414 offset, i2c_index, i2c_address, count);
1415
1416 chan = init_i2c_device_find(bios->dev, i2c_index);
1417 if (!chan)
1418 return false;
1419
1420 for (i = 0; i < count; i++) {
1421 data[i] = bios->data[offset + 4 + i];
1422
1423 BIOSLOG(bios, "0x%04X: Data: 0x%02X\n", offset, data[i]);
1424 }
1425
1426 if (bios->execute) {
1427 msg.addr = i2c_address;
1428 msg.flags = 0;
1429 msg.len = count;
1430 msg.buf = data;
1431 if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
1432 return false;
1433 }
1434
1435 return true;
1436}
1437
1438static bool
1439init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1440{
1441 /*
1442 * INIT_TMDS opcode: 0x4F ('O') (non-canon name)
1443 *
1444 * offset (8 bit): opcode
1445 * offset + 1 (8 bit): magic lookup value
1446 * offset + 2 (8 bit): TMDS address
1447 * offset + 3 (8 bit): mask
1448 * offset + 4 (8 bit): data
1449 *
1450 * Read the data reg for TMDS address "TMDS address", AND it with mask
1451 * and OR it with data, then write it back
1452 * "magic lookup value" determines which TMDS base address register is
1453 * used -- see get_tmds_index_reg()
1454 */
1455
1456 uint8_t mlv = bios->data[offset + 1];
1457 uint32_t tmdsaddr = bios->data[offset + 2];
1458 uint8_t mask = bios->data[offset + 3];
1459 uint8_t data = bios->data[offset + 4];
1460 uint32_t reg, value;
1461
1462 if (!iexec->execute)
1463 return true;
1464
1465 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, "
1466 "Mask: 0x%02X, Data: 0x%02X\n",
1467 offset, mlv, tmdsaddr, mask, data);
1468
1469 reg = get_tmds_index_reg(bios->dev, mlv);
1470 if (!reg)
1471 return false;
1472
1473 bios_wr32(bios, reg,
1474 tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
1475 value = (bios_rd32(bios, reg + 4) & mask) | data;
1476 bios_wr32(bios, reg + 4, value);
1477 bios_wr32(bios, reg, tmdsaddr);
1478
1479 return true;
1480}
1481
1482static bool
1483init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
1484 struct init_exec *iexec)
1485{
1486 /*
1487 * INIT_ZM_TMDS_GROUP opcode: 0x50 ('P') (non-canon name)
1488 *
1489 * offset (8 bit): opcode
1490 * offset + 1 (8 bit): magic lookup value
1491 * offset + 2 (8 bit): count
1492 * offset + 3 (8 bit): addr 1
1493 * offset + 4 (8 bit): data 1
1494 * ...
1495 *
1496 * For each of "count" TMDS address and data pairs write "data n" to
1497 * "addr n". "magic lookup value" determines which TMDS base address
1498 * register is used -- see get_tmds_index_reg()
1499 */
1500
1501 uint8_t mlv = bios->data[offset + 1];
1502 uint8_t count = bios->data[offset + 2];
1503 uint32_t reg;
1504 int i;
1505
1506 if (!iexec->execute)
1507 return true;
1508
1509 BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n",
1510 offset, mlv, count);
1511
1512 reg = get_tmds_index_reg(bios->dev, mlv);
1513 if (!reg)
1514 return false;
1515
1516 for (i = 0; i < count; i++) {
1517 uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
1518 uint8_t tmdsdata = bios->data[offset + 4 + i * 2];
1519
1520 bios_wr32(bios, reg + 4, tmdsdata);
1521 bios_wr32(bios, reg, tmdsaddr);
1522 }
1523
1524 return true;
1525}
1526
1527static bool
1528init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset,
1529 struct init_exec *iexec)
1530{
1531 /*
1532 * INIT_CR_INDEX_ADDRESS_LATCHED opcode: 0x51 ('Q')
1533 *
1534 * offset (8 bit): opcode
1535 * offset + 1 (8 bit): CRTC index1
1536 * offset + 2 (8 bit): CRTC index2
1537 * offset + 3 (8 bit): baseaddr
1538 * offset + 4 (8 bit): count
1539 * offset + 5 (8 bit): data 1
1540 * ...
1541 *
1542 * For each of "count" address and data pairs, write "baseaddr + n" to
1543 * "CRTC index1" and "data n" to "CRTC index2"
1544 * Once complete, restore initial value read from "CRTC index1"
1545 */
1546 uint8_t crtcindex1 = bios->data[offset + 1];
1547 uint8_t crtcindex2 = bios->data[offset + 2];
1548 uint8_t baseaddr = bios->data[offset + 3];
1549 uint8_t count = bios->data[offset + 4];
1550 uint8_t oldaddr, data;
1551 int i;
1552
1553 if (!iexec->execute)
1554 return true;
1555
1556 BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, "
1557 "BaseAddr: 0x%02X, Count: 0x%02X\n",
1558 offset, crtcindex1, crtcindex2, baseaddr, count);
1559
1560 oldaddr = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex1);
1561
1562 for (i = 0; i < count; i++) {
1563 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1,
1564 baseaddr + i);
1565 data = bios->data[offset + 5 + i];
1566 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex2, data);
1567 }
1568
1569 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr);
1570
1571 return true;
1572}
1573
1574static bool
1575init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1576{
1577 /*
1578 * INIT_CR opcode: 0x52 ('R')
1579 *
1580 * offset (8 bit): opcode
1581 * offset + 1 (8 bit): CRTC index
1582 * offset + 2 (8 bit): mask
1583 * offset + 3 (8 bit): data
1584 *
1585 * Assign the value of at "CRTC index" ANDed with mask and ORed with
1586 * data back to "CRTC index"
1587 */
1588
1589 uint8_t crtcindex = bios->data[offset + 1];
1590 uint8_t mask = bios->data[offset + 2];
1591 uint8_t data = bios->data[offset + 3];
1592 uint8_t value;
1593
1594 if (!iexec->execute)
1595 return true;
1596
1597 BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n",
1598 offset, crtcindex, mask, data);
1599
1600 value = bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, crtcindex) & mask;
1601 value |= data;
1602 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value);
1603
1604 return true;
1605}
1606
1607static bool
1608init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1609{
1610 /*
1611 * INIT_ZM_CR opcode: 0x53 ('S')
1612 *
1613 * offset (8 bit): opcode
1614 * offset + 1 (8 bit): CRTC index
1615 * offset + 2 (8 bit): value
1616 *
1617 * Assign "value" to CRTC register with index "CRTC index".
1618 */
1619
1620 uint8_t crtcindex = ROM32(bios->data[offset + 1]);
1621 uint8_t data = bios->data[offset + 2];
1622
1623 if (!iexec->execute)
1624 return true;
1625
1626 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data);
1627
1628 return true;
1629}
1630
1631static bool
1632init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1633{
1634 /*
1635 * INIT_ZM_CR_GROUP opcode: 0x54 ('T')
1636 *
1637 * offset (8 bit): opcode
1638 * offset + 1 (8 bit): count
1639 * offset + 2 (8 bit): CRTC index 1
1640 * offset + 3 (8 bit): value 1
1641 * ...
1642 *
1643 * For "count", assign "value n" to CRTC register with index
1644 * "CRTC index n".
1645 */
1646
1647 uint8_t count = bios->data[offset + 1];
1648 int i;
1649
1650 if (!iexec->execute)
1651 return true;
1652
1653 for (i = 0; i < count; i++)
1654 init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec);
1655
1656 return true;
1657}
1658
1659static bool
1660init_condition_time(struct nvbios *bios, uint16_t offset,
1661 struct init_exec *iexec)
1662{
1663 /*
1664 * INIT_CONDITION_TIME opcode: 0x56 ('V')
1665 *
1666 * offset (8 bit): opcode
1667 * offset + 1 (8 bit): condition number
1668 * offset + 2 (8 bit): retries / 50
1669 *
1670 * Check condition "condition number" in the condition table.
1671 * Bios code then sleeps for 2ms if the condition is not met, and
1672 * repeats up to "retries" times, but on one C51 this has proved
1673 * insufficient. In mmiotraces the driver sleeps for 20ms, so we do
1674 * this, and bail after "retries" times, or 2s, whichever is less.
1675 * If still not met after retries, clear execution flag for this table.
1676 */
1677
1678 uint8_t cond = bios->data[offset + 1];
1679 uint16_t retries = bios->data[offset + 2] * 50;
1680 unsigned cnt;
1681
1682 if (!iexec->execute)
1683 return true;
1684
1685 if (retries > 100)
1686 retries = 100;
1687
1688 BIOSLOG(bios, "0x%04X: Condition: 0x%02X, Retries: 0x%02X\n",
1689 offset, cond, retries);
1690
1691 if (!bios->execute) /* avoid 2s delays when "faking" execution */
1692 retries = 1;
1693
1694 for (cnt = 0; cnt < retries; cnt++) {
1695 if (bios_condition_met(bios, offset, cond)) {
1696 BIOSLOG(bios, "0x%04X: Condition met, continuing\n",
1697 offset);
1698 break;
1699 } else {
1700 BIOSLOG(bios, "0x%04X: "
1701 "Condition not met, sleeping for 20ms\n",
1702 offset);
1703 msleep(20);
1704 }
1705 }
1706
1707 if (!bios_condition_met(bios, offset, cond)) {
1708 NV_WARN(bios->dev,
1709 "0x%04X: Condition still not met after %dms, "
1710 "skipping following opcodes\n", offset, 20 * retries);
1711 iexec->execute = false;
1712 }
1713
1714 return true;
1715}
1716
1717static bool
1718init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
1719 struct init_exec *iexec)
1720{
1721 /*
1722 * INIT_ZM_REG_SEQUENCE opcode: 0x58 ('X')
1723 *
1724 * offset (8 bit): opcode
1725 * offset + 1 (32 bit): base register
1726 * offset + 5 (8 bit): count
1727 * offset + 6 (32 bit): value 1
1728 * ...
1729 *
1730 * Starting at offset + 6 there are "count" 32 bit values.
1731 * For "count" iterations set "base register" + 4 * current_iteration
1732 * to "value current_iteration"
1733 */
1734
1735 uint32_t basereg = ROM32(bios->data[offset + 1]);
1736 uint32_t count = bios->data[offset + 5];
1737 int i;
1738
1739 if (!iexec->execute)
1740 return true;
1741
1742 BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n",
1743 offset, basereg, count);
1744
1745 for (i = 0; i < count; i++) {
1746 uint32_t reg = basereg + i * 4;
1747 uint32_t data = ROM32(bios->data[offset + 6 + i * 4]);
1748
1749 bios_wr32(bios, reg, data);
1750 }
1751
1752 return true;
1753}
1754
1755static bool
1756init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1757{
1758 /*
1759 * INIT_SUB_DIRECT opcode: 0x5B ('[')
1760 *
1761 * offset (8 bit): opcode
1762 * offset + 1 (16 bit): subroutine offset (in bios)
1763 *
1764 * Calls a subroutine that will execute commands until INIT_DONE
1765 * is found.
1766 */
1767
1768 uint16_t sub_offset = ROM16(bios->data[offset + 1]);
1769
1770 if (!iexec->execute)
1771 return true;
1772
1773 BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n",
1774 offset, sub_offset);
1775
1776 parse_init_table(bios, sub_offset, iexec);
1777
1778 BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset);
1779
1780 return true;
1781}
1782
1783static bool
1784init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1785{
1786 /*
1787 * INIT_COPY_NV_REG opcode: 0x5F ('_')
1788 *
1789 * offset (8 bit): opcode
1790 * offset + 1 (32 bit): src reg
1791 * offset + 5 (8 bit): shift
1792 * offset + 6 (32 bit): src mask
1793 * offset + 10 (32 bit): xor
1794 * offset + 14 (32 bit): dst reg
1795 * offset + 18 (32 bit): dst mask
1796 *
1797 * Shift REGVAL("src reg") right by (signed) "shift", AND result with
1798 * "src mask", then XOR with "xor". Write this OR'd with
1799 * (REGVAL("dst reg") AND'd with "dst mask") to "dst reg"
1800 */
1801
1802 uint32_t srcreg = *((uint32_t *)(&bios->data[offset + 1]));
1803 uint8_t shift = bios->data[offset + 5];
1804 uint32_t srcmask = *((uint32_t *)(&bios->data[offset + 6]));
1805 uint32_t xor = *((uint32_t *)(&bios->data[offset + 10]));
1806 uint32_t dstreg = *((uint32_t *)(&bios->data[offset + 14]));
1807 uint32_t dstmask = *((uint32_t *)(&bios->data[offset + 18]));
1808 uint32_t srcvalue, dstvalue;
1809
1810 if (!iexec->execute)
1811 return true;
1812
1813 BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, "
1814 "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n",
1815 offset, srcreg, shift, srcmask, xor, dstreg, dstmask);
1816
1817 srcvalue = bios_rd32(bios, srcreg);
1818
1819 if (shift < 0x80)
1820 srcvalue >>= shift;
1821 else
1822 srcvalue <<= (0x100 - shift);
1823
1824 srcvalue = (srcvalue & srcmask) ^ xor;
1825
1826 dstvalue = bios_rd32(bios, dstreg) & dstmask;
1827
1828 bios_wr32(bios, dstreg, dstvalue | srcvalue);
1829
1830 return true;
1831}
1832
1833static bool
1834init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1835{
1836 /*
1837 * INIT_ZM_INDEX_IO opcode: 0x62 ('b')
1838 *
1839 * offset (8 bit): opcode
1840 * offset + 1 (16 bit): CRTC port
1841 * offset + 3 (8 bit): CRTC index
1842 * offset + 4 (8 bit): data
1843 *
1844 * Write "data" to index "CRTC index" of "CRTC port"
1845 */
1846 uint16_t crtcport = ROM16(bios->data[offset + 1]);
1847 uint8_t crtcindex = bios->data[offset + 3];
1848 uint8_t data = bios->data[offset + 4];
1849
1850 if (!iexec->execute)
1851 return true;
1852
1853 bios_idxprt_wr(bios, crtcport, crtcindex, data);
1854
1855 return true;
1856}
1857
1858static bool
1859init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1860{
1861 /*
1862 * INIT_COMPUTE_MEM opcode: 0x63 ('c')
1863 *
1864 * offset (8 bit): opcode
1865 *
1866 * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
1867 * that the hardware can correctly calculate how much VRAM it has
1868 * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
1869 *
1870 * The implementation of this opcode in general consists of two parts:
1871 * 1) determination of the memory bus width
1872 * 2) determination of how many of the card's RAM pads have ICs attached
1873 *
1874 * 1) is done by a cunning combination of writes to offsets 0x1c and
1875 * 0x3c in the framebuffer, and seeing whether the written values are
1876 * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
1877 *
1878 * 2) is done by a cunning combination of writes to an offset slightly
1879 * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
1880 * if the test pattern can be read back. This then affects bits 12-15 of
1881 * NV_PFB_CFG0
1882 *
1883 * In this context a "cunning combination" may include multiple reads
1884 * and writes to varying locations, often alternating the test pattern
1885 * and 0, doubtless to make sure buffers are filled, residual charges
1886 * on tracks are removed etc.
1887 *
1888 * Unfortunately, the "cunning combination"s mentioned above, and the
1889 * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
1890 * trace I have.
1891 *
1892 * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
1893 * we started was correct, and use that instead
1894 */
1895
1896 /* no iexec->execute check by design */
1897
1898 /*
1899 * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
1900 * and kmmio traces of the binary driver POSTing the card show nothing
1901 * being done for this opcode. why is it still listed in the table?!
1902 */
1903
1904 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
1905
1906 if (dev_priv->card_type >= NV_50)
1907 return true;
1908
1909 /*
1910 * On every card I've seen, this step gets done for us earlier in
1911 * the init scripts
1912 uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
1913 bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
1914 */
1915
1916 /*
1917 * This also has probably been done in the scripts, but an mmio trace of
1918 * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
1919 */
1920 bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
1921
1922 /* write back the saved configuration value */
1923 bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
1924
1925 return true;
1926}
1927
1928static bool
1929init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
1930{
1931 /*
1932 * INIT_RESET opcode: 0x65 ('e')
1933 *
1934 * offset (8 bit): opcode
1935 * offset + 1 (32 bit): register
1936 * offset + 5 (32 bit): value1
1937 * offset + 9 (32 bit): value2
1938 *
1939 * Assign "value1" to "register", then assign "value2" to "register"
1940 */
1941
1942 uint32_t reg = ROM32(bios->data[offset + 1]);
1943 uint32_t value1 = ROM32(bios->data[offset + 5]);
1944 uint32_t value2 = ROM32(bios->data[offset + 9]);
1945 uint32_t pci_nv_19, pci_nv_20;
1946
1947 /* no iexec->execute check by design */
1948
1949 pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
1950 bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
1951 bios_wr32(bios, reg, value1);
1952
1953 udelay(10);
1954
1955 bios_wr32(bios, reg, value2);
1956 bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19);
1957
1958 pci_nv_20 = bios_rd32(bios, NV_PBUS_PCI_NV_20);
1959 pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */
1960 bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20);
1961
1962 return true;
1963}
1964
1965static bool
1966init_configure_mem(struct nvbios *bios, uint16_t offset,
1967 struct init_exec *iexec)
1968{
1969 /*
1970 * INIT_CONFIGURE_MEM opcode: 0x66 ('f')
1971 *
1972 * offset (8 bit): opcode
1973 *
1974 * Equivalent to INIT_DONE on bios version 3 or greater.
1975 * For early bios versions, sets up the memory registers, using values
1976 * taken from the memory init table
1977 */
1978
1979 /* no iexec->execute check by design */
1980
1981 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
1982 uint16_t seqtbloffs = bios->legacy.sdr_seq_tbl_ptr, meminitdata = meminitoffs + 6;
1983 uint32_t reg, data;
1984
1985 if (bios->major_version > 2)
1986 return false;
1987
1988 bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
1989 bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
1990
1991 if (bios->data[meminitoffs] & 1)
1992 seqtbloffs = bios->legacy.ddr_seq_tbl_ptr;
1993
1994 for (reg = ROM32(bios->data[seqtbloffs]);
1995 reg != 0xffffffff;
1996 reg = ROM32(bios->data[seqtbloffs += 4])) {
1997
1998 switch (reg) {
1999 case NV_PFB_PRE:
2000 data = NV_PFB_PRE_CMD_PRECHARGE;
2001 break;
2002 case NV_PFB_PAD:
2003 data = NV_PFB_PAD_CKE_NORMAL;
2004 break;
2005 case NV_PFB_REF:
2006 data = NV_PFB_REF_CMD_REFRESH;
2007 break;
2008 default:
2009 data = ROM32(bios->data[meminitdata]);
2010 meminitdata += 4;
2011 if (data == 0xffffffff)
2012 continue;
2013 }
2014
2015 bios_wr32(bios, reg, data);
2016 }
2017
2018 return true;
2019}
2020
2021static bool
2022init_configure_clk(struct nvbios *bios, uint16_t offset,
2023 struct init_exec *iexec)
2024{
2025 /*
2026 * INIT_CONFIGURE_CLK opcode: 0x67 ('g')
2027 *
2028 * offset (8 bit): opcode
2029 *
2030 * Equivalent to INIT_DONE on bios version 3 or greater.
2031 * For early bios versions, sets up the NVClk and MClk PLLs, using
2032 * values taken from the memory init table
2033 */
2034
2035 /* no iexec->execute check by design */
2036
2037 uint16_t meminitoffs = bios->legacy.mem_init_tbl_ptr + MEM_INIT_SIZE * (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_SCRATCH4__INDEX) >> 4);
2038 int clock;
2039
2040 if (bios->major_version > 2)
2041 return false;
2042
2043 clock = ROM16(bios->data[meminitoffs + 4]) * 10;
2044 setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
2045
2046 clock = ROM16(bios->data[meminitoffs + 2]) * 10;
2047 if (bios->data[meminitoffs] & 1) /* DDR */
2048 clock *= 2;
2049 setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock);
2050
2051 return true;
2052}
2053
2054static bool
2055init_configure_preinit(struct nvbios *bios, uint16_t offset,
2056 struct init_exec *iexec)
2057{
2058 /*
2059 * INIT_CONFIGURE_PREINIT opcode: 0x68 ('h')
2060 *
2061 * offset (8 bit): opcode
2062 *
2063 * Equivalent to INIT_DONE on bios version 3 or greater.
2064 * For early bios versions, does early init, loading ram and crystal
2065 * configuration from straps into CR3C
2066 */
2067
2068 /* no iexec->execute check by design */
2069
2070 uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
2071 uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
2072
2073 if (bios->major_version > 2)
2074 return false;
2075
2076 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
2077 NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
2078
2079 return true;
2080}
2081
2082static bool
2083init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2084{
2085 /*
2086 * INIT_IO opcode: 0x69 ('i')
2087 *
2088 * offset (8 bit): opcode
2089 * offset + 1 (16 bit): CRTC port
2090 * offset + 3 (8 bit): mask
2091 * offset + 4 (8 bit): data
2092 *
2093 * Assign ((IOVAL("crtc port") & "mask") | "data") to "crtc port"
2094 */
2095
2096 struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
2097 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2098 uint8_t mask = bios->data[offset + 3];
2099 uint8_t data = bios->data[offset + 4];
2100
2101 if (!iexec->execute)
2102 return true;
2103
2104 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n",
2105 offset, crtcport, mask, data);
2106
2107 /*
2108 * I have no idea what this does, but NVIDIA do this magic sequence
2109 * in the places where this INIT_IO happens..
2110 */
2111 if (dev_priv->card_type >= NV_50 && crtcport == 0x3c3 && data == 1) {
2112 int i;
2113
2114 bios_wr32(bios, 0x614100, (bios_rd32(
2115 bios, 0x614100) & 0x0fffffff) | 0x00800000);
2116
2117 bios_wr32(bios, 0x00e18c, bios_rd32(
2118 bios, 0x00e18c) | 0x00020000);
2119
2120 bios_wr32(bios, 0x614900, (bios_rd32(
2121 bios, 0x614900) & 0x0fffffff) | 0x00800000);
2122
2123 bios_wr32(bios, 0x000200, bios_rd32(
2124 bios, 0x000200) & ~0x40000000);
2125
2126 mdelay(10);
2127
2128 bios_wr32(bios, 0x00e18c, bios_rd32(
2129 bios, 0x00e18c) & ~0x00020000);
2130
2131 bios_wr32(bios, 0x000200, bios_rd32(
2132 bios, 0x000200) | 0x40000000);
2133
2134 bios_wr32(bios, 0x614100, 0x00800018);
2135 bios_wr32(bios, 0x614900, 0x00800018);
2136
2137 mdelay(10);
2138
2139 bios_wr32(bios, 0x614100, 0x10000018);
2140 bios_wr32(bios, 0x614900, 0x10000018);
2141
2142 for (i = 0; i < 3; i++)
2143 bios_wr32(bios, 0x614280 + (i*0x800), bios_rd32(
2144 bios, 0x614280 + (i*0x800)) & 0xf0f0f0f0);
2145
2146 for (i = 0; i < 2; i++)
2147 bios_wr32(bios, 0x614300 + (i*0x800), bios_rd32(
2148 bios, 0x614300 + (i*0x800)) & 0xfffff0f0);
2149
2150 for (i = 0; i < 3; i++)
2151 bios_wr32(bios, 0x614380 + (i*0x800), bios_rd32(
2152 bios, 0x614380 + (i*0x800)) & 0xfffff0f0);
2153
2154 for (i = 0; i < 2; i++)
2155 bios_wr32(bios, 0x614200 + (i*0x800), bios_rd32(
2156 bios, 0x614200 + (i*0x800)) & 0xfffffff0);
2157
2158 for (i = 0; i < 2; i++)
2159 bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32(
2160 bios, 0x614108 + (i*0x800)) & 0x0fffffff);
2161 return true;
2162 }
2163
2164 bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) |
2165 data);
2166 return true;
2167}
2168
2169static bool
2170init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2171{
2172 /*
2173 * INIT_SUB opcode: 0x6B ('k')
2174 *
2175 * offset (8 bit): opcode
2176 * offset + 1 (8 bit): script number
2177 *
2178 * Execute script number "script number", as a subroutine
2179 */
2180
2181 uint8_t sub = bios->data[offset + 1];
2182
2183 if (!iexec->execute)
2184 return true;
2185
2186 BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub);
2187
2188 parse_init_table(bios,
2189 ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]),
2190 iexec);
2191
2192 BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub);
2193
2194 return true;
2195}
2196
2197static bool
2198init_ram_condition(struct nvbios *bios, uint16_t offset,
2199 struct init_exec *iexec)
2200{
2201 /*
2202 * INIT_RAM_CONDITION opcode: 0x6D ('m')
2203 *
2204 * offset (8 bit): opcode
2205 * offset + 1 (8 bit): mask
2206 * offset + 2 (8 bit): cmpval
2207 *
2208 * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
2209 * If condition not met skip subsequent opcodes until condition is
2210 * inverted (INIT_NOT), or we hit INIT_RESUME
2211 */
2212
2213 uint8_t mask = bios->data[offset + 1];
2214 uint8_t cmpval = bios->data[offset + 2];
2215 uint8_t data;
2216
2217 if (!iexec->execute)
2218 return true;
2219
2220 data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
2221
2222 BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
2223 offset, data, cmpval);
2224
2225 if (data == cmpval)
2226 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2227 else {
2228 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2229 iexec->execute = false;
2230 }
2231
2232 return true;
2233}
2234
2235static bool
2236init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2237{
2238 /*
2239 * INIT_NV_REG opcode: 0x6E ('n')
2240 *
2241 * offset (8 bit): opcode
2242 * offset + 1 (32 bit): register
2243 * offset + 5 (32 bit): mask
2244 * offset + 9 (32 bit): data
2245 *
2246 * Assign ((REGVAL("register") & "mask") | "data") to "register"
2247 */
2248
2249 uint32_t reg = ROM32(bios->data[offset + 1]);
2250 uint32_t mask = ROM32(bios->data[offset + 5]);
2251 uint32_t data = ROM32(bios->data[offset + 9]);
2252
2253 if (!iexec->execute)
2254 return true;
2255
2256 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n",
2257 offset, reg, mask, data);
2258
2259 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data);
2260
2261 return true;
2262}
2263
2264static bool
2265init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2266{
2267 /*
2268 * INIT_MACRO opcode: 0x6F ('o')
2269 *
2270 * offset (8 bit): opcode
2271 * offset + 1 (8 bit): macro number
2272 *
2273 * Look up macro index "macro number" in the macro index table.
2274 * The macro index table entry has 1 byte for the index in the macro
2275 * table, and 1 byte for the number of times to repeat the macro.
2276 * The macro table entry has 4 bytes for the register address and
2277 * 4 bytes for the value to write to that register
2278 */
2279
2280 uint8_t macro_index_tbl_idx = bios->data[offset + 1];
2281 uint16_t tmp = bios->macro_index_tbl_ptr + (macro_index_tbl_idx * MACRO_INDEX_SIZE);
2282 uint8_t macro_tbl_idx = bios->data[tmp];
2283 uint8_t count = bios->data[tmp + 1];
2284 uint32_t reg, data;
2285 int i;
2286
2287 if (!iexec->execute)
2288 return true;
2289
2290 BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, "
2291 "Count: 0x%02X\n",
2292 offset, macro_index_tbl_idx, macro_tbl_idx, count);
2293
2294 for (i = 0; i < count; i++) {
2295 uint16_t macroentryptr = bios->macro_tbl_ptr + (macro_tbl_idx + i) * MACRO_SIZE;
2296
2297 reg = ROM32(bios->data[macroentryptr]);
2298 data = ROM32(bios->data[macroentryptr + 4]);
2299
2300 bios_wr32(bios, reg, data);
2301 }
2302
2303 return true;
2304}
2305
2306static bool
2307init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2308{
2309 /*
2310 * INIT_DONE opcode: 0x71 ('q')
2311 *
2312 * offset (8 bit): opcode
2313 *
2314 * End the current script
2315 */
2316
2317 /* mild retval abuse to stop parsing this table */
2318 return false;
2319}
2320
2321static bool
2322init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2323{
2324 /*
2325 * INIT_RESUME opcode: 0x72 ('r')
2326 *
2327 * offset (8 bit): opcode
2328 *
2329 * End the current execute / no-execute condition
2330 */
2331
2332 if (iexec->execute)
2333 return true;
2334
2335 iexec->execute = true;
2336 BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset);
2337
2338 return true;
2339}
2340
2341static bool
2342init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2343{
2344 /*
2345 * INIT_TIME opcode: 0x74 ('t')
2346 *
2347 * offset (8 bit): opcode
2348 * offset + 1 (16 bit): time
2349 *
2350 * Sleep for "time" microseconds.
2351 */
2352
2353 unsigned time = ROM16(bios->data[offset + 1]);
2354
2355 if (!iexec->execute)
2356 return true;
2357
2358 BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n",
2359 offset, time);
2360
2361 if (time < 1000)
2362 udelay(time);
2363 else
2364 msleep((time + 900) / 1000);
2365
2366 return true;
2367}
2368
2369static bool
2370init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2371{
2372 /*
2373 * INIT_CONDITION opcode: 0x75 ('u')
2374 *
2375 * offset (8 bit): opcode
2376 * offset + 1 (8 bit): condition number
2377 *
2378 * Check condition "condition number" in the condition table.
2379 * If condition not met skip subsequent opcodes until condition is
2380 * inverted (INIT_NOT), or we hit INIT_RESUME
2381 */
2382
2383 uint8_t cond = bios->data[offset + 1];
2384
2385 if (!iexec->execute)
2386 return true;
2387
2388 BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond);
2389
2390 if (bios_condition_met(bios, offset, cond))
2391 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2392 else {
2393 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2394 iexec->execute = false;
2395 }
2396
2397 return true;
2398}
2399
2400static bool
2401init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2402{
2403 /*
2404 * INIT_IO_CONDITION opcode: 0x76
2405 *
2406 * offset (8 bit): opcode
2407 * offset + 1 (8 bit): condition number
2408 *
2409 * Check condition "condition number" in the io condition table.
2410 * If condition not met skip subsequent opcodes until condition is
2411 * inverted (INIT_NOT), or we hit INIT_RESUME
2412 */
2413
2414 uint8_t cond = bios->data[offset + 1];
2415
2416 if (!iexec->execute)
2417 return true;
2418
2419 BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond);
2420
2421 if (io_condition_met(bios, offset, cond))
2422 BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset);
2423 else {
2424 BIOSLOG(bios, "0x%04X: Condition not fulfilled -- skipping following commands\n", offset);
2425 iexec->execute = false;
2426 }
2427
2428 return true;
2429}
2430
2431static bool
2432init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2433{
2434 /*
2435 * INIT_INDEX_IO opcode: 0x78 ('x')
2436 *
2437 * offset (8 bit): opcode
2438 * offset + 1 (16 bit): CRTC port
2439 * offset + 3 (8 bit): CRTC index
2440 * offset + 4 (8 bit): mask
2441 * offset + 5 (8 bit): data
2442 *
2443 * Read value at index "CRTC index" on "CRTC port", AND with "mask",
2444 * OR with "data", write-back
2445 */
2446
2447 uint16_t crtcport = ROM16(bios->data[offset + 1]);
2448 uint8_t crtcindex = bios->data[offset + 3];
2449 uint8_t mask = bios->data[offset + 4];
2450 uint8_t data = bios->data[offset + 5];
2451 uint8_t value;
2452
2453 if (!iexec->execute)
2454 return true;
2455
2456 BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, "
2457 "Data: 0x%02X\n",
2458 offset, crtcport, crtcindex, mask, data);
2459
2460 value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data;
2461 bios_idxprt_wr(bios, crtcport, crtcindex, value);
2462
2463 return true;
2464}
2465
2466static bool
2467init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2468{
2469 /*
2470 * INIT_PLL opcode: 0x79 ('y')
2471 *
2472 * offset (8 bit): opcode
2473 * offset + 1 (32 bit): register
2474 * offset + 5 (16 bit): freq
2475 *
2476 * Set PLL register "register" to coefficients for frequency (10kHz)
2477 * "freq"
2478 */
2479
2480 uint32_t reg = ROM32(bios->data[offset + 1]);
2481 uint16_t freq = ROM16(bios->data[offset + 5]);
2482
2483 if (!iexec->execute)
2484 return true;
2485
2486 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq);
2487
2488 setPLL(bios, reg, freq * 10);
2489
2490 return true;
2491}
2492
2493static bool
2494init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2495{
2496 /*
2497 * INIT_ZM_REG opcode: 0x7A ('z')
2498 *
2499 * offset (8 bit): opcode
2500 * offset + 1 (32 bit): register
2501 * offset + 5 (32 bit): value
2502 *
2503 * Assign "value" to "register"
2504 */
2505
2506 uint32_t reg = ROM32(bios->data[offset + 1]);
2507 uint32_t value = ROM32(bios->data[offset + 5]);
2508
2509 if (!iexec->execute)
2510 return true;
2511
2512 if (reg == 0x000200)
2513 value |= 1;
2514
2515 bios_wr32(bios, reg, value);
2516
2517 return true;
2518}
2519
2520static bool
2521init_ram_restrict_pll(struct nvbios *bios, uint16_t offset,
2522 struct init_exec *iexec)
2523{
2524 /*
2525 * INIT_RAM_RESTRICT_PLL opcode: 0x87 ('')
2526 *
2527 * offset (8 bit): opcode
2528 * offset + 1 (8 bit): PLL type
2529 * offset + 2 (32 bit): frequency 0
2530 *
2531 * Uses the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
2532 * ram_restrict_table_ptr. The value read from there is used to select
2533 * a frequency from the table starting at 'frequency 0' to be
2534 * programmed into the PLL corresponding to 'type'.
2535 *
2536 * The PLL limits table on cards using this opcode has a mapping of
2537 * 'type' to the relevant registers.
2538 */
2539
2540 struct drm_device *dev = bios->dev;
2541 uint32_t strap = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) & 0x0000003c) >> 2;
2542 uint8_t index = bios->data[bios->ram_restrict_tbl_ptr + strap];
2543 uint8_t type = bios->data[offset + 1];
2544 uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]);
2545 uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry;
2546 int i;
2547
2548 if (!iexec->execute)
2549 return true;
2550
2551 if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) {
2552 NV_ERROR(dev, "PLL limits table not version 3.x\n");
2553 return true; /* deliberate, allow default clocks to remain */
2554 }
2555
2556 entry = pll_limits + pll_limits[1];
2557 for (i = 0; i < pll_limits[3]; i++, entry += pll_limits[2]) {
2558 if (entry[0] == type) {
2559 uint32_t reg = ROM32(entry[3]);
2560
2561 BIOSLOG(bios, "0x%04X: "
2562 "Type %02x Reg 0x%08x Freq %dKHz\n",
2563 offset, type, reg, freq);
2564
2565 setPLL(bios, reg, freq);
2566 return true;
2567 }
2568 }
2569
2570 NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type);
2571 return true;
2572}
2573
2574static bool
2575init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2576{
2577 /*
2578 * INIT_8C opcode: 0x8C ('')
2579 *
2580 * NOP so far....
2581 *
2582 */
2583
2584 return true;
2585}
2586
2587static bool
2588init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2589{
2590 /*
2591 * INIT_8D opcode: 0x8D ('')
2592 *
2593 * NOP so far....
2594 *
2595 */
2596
2597 return true;
2598}
2599
2600static bool
2601init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2602{
2603 /*
2604 * INIT_GPIO opcode: 0x8E ('')
2605 *
2606 * offset (8 bit): opcode
2607 *
2608 * Loop over all entries in the DCB GPIO table, and initialise
2609 * each GPIO according to various values listed in each entry
2610 */
2611
2612 const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
2613 const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
2614 const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
2615 const uint8_t *gpio_entry;
2616 int i;
2617
2618 if (bios->bdcb.version != 0x40) {
2619 NV_ERROR(bios->dev, "DCB table not version 4.0\n");
2620 return false;
2621 }
2622
2623 if (!bios->bdcb.gpio_table_ptr) {
2624 NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
2625 return false;
2626 }
2627
2628 gpio_entry = gpio_table + gpio_table[1];
2629 for (i = 0; i < gpio_table[2]; i++, gpio_entry += gpio_table[3]) {
2630 uint32_t entry = ROM32(gpio_entry[0]), r, s, v;
2631 int line = (entry & 0x0000001f);
2632
2633 BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, entry);
2634
2635 if ((entry & 0x0000ff00) == 0x0000ff00)
2636 continue;
2637
2638 r = nv50_gpio_reg[line >> 3];
2639 s = (line & 0x07) << 2;
2640 v = bios_rd32(bios, r) & ~(0x00000003 << s);
2641 if (entry & 0x01000000)
2642 v |= (((entry & 0x60000000) >> 29) ^ 2) << s;
2643 else
2644 v |= (((entry & 0x18000000) >> 27) ^ 2) << s;
2645 bios_wr32(bios, r, v);
2646
2647 r = nv50_gpio_ctl[line >> 4];
2648 s = (line & 0x0f);
2649 v = bios_rd32(bios, r) & ~(0x00010001 << s);
2650 switch ((entry & 0x06000000) >> 25) {
2651 case 1:
2652 v |= (0x00000001 << s);
2653 break;
2654 case 2:
2655 v |= (0x00010000 << s);
2656 break;
2657 default:
2658 break;
2659 }
2660 bios_wr32(bios, r, v);
2661 }
2662
2663 return true;
2664}
2665
2666/* hack to avoid moving the itbl_entry array before this function */
2667int init_ram_restrict_zm_reg_group_blocklen;
2668
2669static bool
2670init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
2671 struct init_exec *iexec)
2672{
2673 /*
2674 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode: 0x8F ('')
2675 *
2676 * offset (8 bit): opcode
2677 * offset + 1 (32 bit): reg
2678 * offset + 5 (8 bit): regincrement
2679 * offset + 6 (8 bit): count
2680 * offset + 7 (32 bit): value 1,1
2681 * ...
2682 *
2683 * Use the RAMCFG strap of PEXTDEV_BOOT as an index into the table at
2684 * ram_restrict_table_ptr. The value read from here is 'n', and
2685 * "value 1,n" gets written to "reg". This repeats "count" times and on
2686 * each iteration 'm', "reg" increases by "regincrement" and
2687 * "value m,n" is used. The extent of n is limited by a number read
2688 * from the 'M' BIT table, herein called "blocklen"
2689 */
2690
2691 uint32_t reg = ROM32(bios->data[offset + 1]);
2692 uint8_t regincrement = bios->data[offset + 5];
2693 uint8_t count = bios->data[offset + 6];
2694 uint32_t strap_ramcfg, data;
2695 uint16_t blocklen;
2696 uint8_t index;
2697 int i;
2698
2699 /* previously set by 'M' BIT table */
2700 blocklen = init_ram_restrict_zm_reg_group_blocklen;
2701
2702 if (!iexec->execute)
2703 return true;
2704
2705 if (!blocklen) {
2706 NV_ERROR(bios->dev,
2707 "0x%04X: Zero block length - has the M table "
2708 "been parsed?\n", offset);
2709 return false;
2710 }
2711
2712 strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
2713 index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
2714
2715 BIOSLOG(bios, "0x%04X: Reg: 0x%08X, RegIncrement: 0x%02X, "
2716 "Count: 0x%02X, StrapRamCfg: 0x%02X, Index: 0x%02X\n",
2717 offset, reg, regincrement, count, strap_ramcfg, index);
2718
2719 for (i = 0; i < count; i++) {
2720 data = ROM32(bios->data[offset + 7 + index * 4 + blocklen * i]);
2721
2722 bios_wr32(bios, reg, data);
2723
2724 reg += regincrement;
2725 }
2726
2727 return true;
2728}
2729
2730static bool
2731init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2732{
2733 /*
2734 * INIT_COPY_ZM_REG opcode: 0x90 ('')
2735 *
2736 * offset (8 bit): opcode
2737 * offset + 1 (32 bit): src reg
2738 * offset + 5 (32 bit): dst reg
2739 *
2740 * Put contents of "src reg" into "dst reg"
2741 */
2742
2743 uint32_t srcreg = ROM32(bios->data[offset + 1]);
2744 uint32_t dstreg = ROM32(bios->data[offset + 5]);
2745
2746 if (!iexec->execute)
2747 return true;
2748
2749 bios_wr32(bios, dstreg, bios_rd32(bios, srcreg));
2750
2751 return true;
2752}
2753
2754static bool
2755init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset,
2756 struct init_exec *iexec)
2757{
2758 /*
2759 * INIT_ZM_REG_GROUP_ADDRESS_LATCHED opcode: 0x91 ('')
2760 *
2761 * offset (8 bit): opcode
2762 * offset + 1 (32 bit): dst reg
2763 * offset + 5 (8 bit): count
2764 * offset + 6 (32 bit): data 1
2765 * ...
2766 *
2767 * For each of "count" values write "data n" to "dst reg"
2768 */
2769
2770 uint32_t reg = ROM32(bios->data[offset + 1]);
2771 uint8_t count = bios->data[offset + 5];
2772 int i;
2773
2774 if (!iexec->execute)
2775 return true;
2776
2777 for (i = 0; i < count; i++) {
2778 uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]);
2779 bios_wr32(bios, reg, data);
2780 }
2781
2782 return true;
2783}
2784
2785static bool
2786init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2787{
2788 /*
2789 * INIT_RESERVED opcode: 0x92 ('')
2790 *
2791 * offset (8 bit): opcode
2792 *
2793 * Seemingly does nothing
2794 */
2795
2796 return true;
2797}
2798
2799static bool
2800init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2801{
2802 /*
2803 * INIT_96 opcode: 0x96 ('')
2804 *
2805 * offset (8 bit): opcode
2806 * offset + 1 (32 bit): sreg
2807 * offset + 5 (8 bit): sshift
2808 * offset + 6 (8 bit): smask
2809 * offset + 7 (8 bit): index
2810 * offset + 8 (32 bit): reg
2811 * offset + 12 (32 bit): mask
2812 * offset + 16 (8 bit): shift
2813 *
2814 */
2815
2816 uint16_t xlatptr = bios->init96_tbl_ptr + (bios->data[offset + 7] * 2);
2817 uint32_t reg = ROM32(bios->data[offset + 8]);
2818 uint32_t mask = ROM32(bios->data[offset + 12]);
2819 uint32_t val;
2820
2821 val = bios_rd32(bios, ROM32(bios->data[offset + 1]));
2822 if (bios->data[offset + 5] < 0x80)
2823 val >>= bios->data[offset + 5];
2824 else
2825 val <<= (0x100 - bios->data[offset + 5]);
2826 val &= bios->data[offset + 6];
2827
2828 val = bios->data[ROM16(bios->data[xlatptr]) + val];
2829 val <<= bios->data[offset + 16];
2830
2831 if (!iexec->execute)
2832 return true;
2833
2834 bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val);
2835 return true;
2836}
2837
2838static bool
2839init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2840{
2841 /*
2842 * INIT_97 opcode: 0x97 ('')
2843 *
2844 * offset (8 bit): opcode
2845 * offset + 1 (32 bit): register
2846 * offset + 5 (32 bit): mask
2847 * offset + 9 (32 bit): value
2848 *
2849 * Adds "value" to "register" preserving the fields specified
2850 * by "mask"
2851 */
2852
2853 uint32_t reg = ROM32(bios->data[offset + 1]);
2854 uint32_t mask = ROM32(bios->data[offset + 5]);
2855 uint32_t add = ROM32(bios->data[offset + 9]);
2856 uint32_t val;
2857
2858 val = bios_rd32(bios, reg);
2859 val = (val & mask) | ((val + add) & ~mask);
2860
2861 if (!iexec->execute)
2862 return true;
2863
2864 bios_wr32(bios, reg, val);
2865 return true;
2866}
2867
2868static bool
2869init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2870{
2871 /*
2872 * INIT_AUXCH opcode: 0x98 ('')
2873 *
2874 * offset (8 bit): opcode
2875 * offset + 1 (32 bit): address
2876 * offset + 5 (8 bit): count
2877 * offset + 6 (8 bit): mask 0
2878 * offset + 7 (8 bit): data 0
2879 * ...
2880 *
2881 */
2882
2883 struct drm_device *dev = bios->dev;
2884 struct nouveau_i2c_chan *auxch;
2885 uint32_t addr = ROM32(bios->data[offset + 1]);
2886 uint8_t len = bios->data[offset + 5];
2887 int ret, i;
2888
2889 if (!bios->display.output) {
2890 NV_ERROR(dev, "INIT_AUXCH: no active output\n");
2891 return false;
2892 }
2893
2894 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2895 if (!auxch) {
2896 NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
2897 bios->display.output->i2c_index);
2898 return false;
2899 }
2900
2901 if (!iexec->execute)
2902 return true;
2903
2904 offset += 6;
2905 for (i = 0; i < len; i++, offset += 2) {
2906 uint8_t data;
2907
2908 ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
2909 if (ret) {
2910 NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
2911 return false;
2912 }
2913
2914 data &= bios->data[offset + 0];
2915 data |= bios->data[offset + 1];
2916
2917 ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
2918 if (ret) {
2919 NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
2920 return false;
2921 }
2922 }
2923
2924 return true;
2925}
2926
2927static bool
2928init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
2929{
2930 /*
2931 * INIT_ZM_AUXCH opcode: 0x99 ('')
2932 *
2933 * offset (8 bit): opcode
2934 * offset + 1 (32 bit): address
2935 * offset + 5 (8 bit): count
2936 * offset + 6 (8 bit): data 0
2937 * ...
2938 *
2939 */
2940
2941 struct drm_device *dev = bios->dev;
2942 struct nouveau_i2c_chan *auxch;
2943 uint32_t addr = ROM32(bios->data[offset + 1]);
2944 uint8_t len = bios->data[offset + 5];
2945 int ret, i;
2946
2947 if (!bios->display.output) {
2948 NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
2949 return false;
2950 }
2951
2952 auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
2953 if (!auxch) {
2954 NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
2955 bios->display.output->i2c_index);
2956 return false;
2957 }
2958
2959 if (!iexec->execute)
2960 return true;
2961
2962 offset += 6;
2963 for (i = 0; i < len; i++, offset++) {
2964 ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
2965 if (ret) {
2966 NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
2967 return false;
2968 }
2969 }
2970
2971 return true;
2972}
2973
2974static struct init_tbl_entry itbl_entry[] = {
2975 /* command name , id , length , offset , mult , command handler */
2976 /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
2977 { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog },
2978 { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat },
2979 { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll },
2980 { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat },
2981 { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy },
2982 { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not },
2983 { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition },
2984 { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched },
2985 { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 },
2986 { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 },
2987 { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte },
2988 { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte },
2989 { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c },
2990 { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds },
2991 { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group },
2992 { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch },
2993 { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr },
2994 { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr },
2995 { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group },
2996 { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time },
2997 { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence },
2998 /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
2999 { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct },
3000 { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg },
3001 { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io },
3002 { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem },
3003 { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset },
3004 { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem },
3005 { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk },
3006 { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit },
3007 { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io },
3008 { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub },
3009 { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition },
3010 { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg },
3011 { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro },
3012 { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done },
3013 { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume },
3014 /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */
3015 { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time },
3016 { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition },
3017 { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition },
3018 { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io },
3019 { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll },
3020 { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg },
3021 /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */
3022 { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll },
3023 { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c },
3024 { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d },
3025 { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio },
3026 /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */
3027 { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group },
3028 { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg },
3029 { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched },
3030 { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved },
3031 { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 },
3032 { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 },
3033 { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch },
3034 { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch },
3035 { NULL , 0 , 0 , 0 , 0 , NULL }
3036};
3037
3038static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i)
3039{
3040 /* Calculates the length of a given init table entry. */
3041 return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier;
3042}
3043
3044#define MAX_TABLE_OPS 1000
3045
3046static int
3047parse_init_table(struct nvbios *bios, unsigned int offset,
3048 struct init_exec *iexec)
3049{
3050 /*
3051 * Parses all commands in an init table.
3052 *
3053 * We start out executing all commands found in the init table. Some
3054 * opcodes may change the status of iexec->execute to SKIP, which will
3055 * cause the following opcodes to perform no operation until the value
3056 * is changed back to EXECUTE.
3057 */
3058
3059 int count = 0, i;
3060 uint8_t id;
3061
3062 /*
3063 * Loop until INIT_DONE causes us to break out of the loop
3064 * (or until offset > bios length just in case... )
3065 * (and no more than MAX_TABLE_OPS iterations, just in case... )
3066 */
3067 while ((offset < bios->length) && (count++ < MAX_TABLE_OPS)) {
3068 id = bios->data[offset];
3069
3070 /* Find matching id in itbl_entry */
3071 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != id); i++)
3072 ;
3073
3074 if (itbl_entry[i].name) {
3075 BIOSLOG(bios, "0x%04X: [ (0x%02X) - %s ]\n",
3076 offset, itbl_entry[i].id, itbl_entry[i].name);
3077
3078 /* execute eventual command handler */
3079 if (itbl_entry[i].handler)
3080 if (!(*itbl_entry[i].handler)(bios, offset, iexec))
3081 break;
3082 } else {
3083 NV_ERROR(bios->dev,
3084 "0x%04X: Init table command not found: "
3085 "0x%02X\n", offset, id);
3086 return -ENOENT;
3087 }
3088
3089 /*
3090 * Add the offset of the current command including all data
3091 * of that command. The offset will then be pointing on the
3092 * next op code.
3093 */
3094 offset += get_init_table_entry_length(bios, offset, i);
3095 }
3096
3097 if (offset >= bios->length)
3098 NV_WARN(bios->dev,
3099 "Offset 0x%04X greater than known bios image length. "
3100 "Corrupt image?\n", offset);
3101 if (count >= MAX_TABLE_OPS)
3102 NV_WARN(bios->dev,
3103 "More than %d opcodes to a table is unlikely, "
3104 "is the bios image corrupt?\n", MAX_TABLE_OPS);
3105
3106 return 0;
3107}
3108
3109static void
3110parse_init_tables(struct nvbios *bios)
3111{
3112 /* Loops and calls parse_init_table() for each present table. */
3113
3114 int i = 0;
3115 uint16_t table;
3116 struct init_exec iexec = {true, false};
3117
3118 if (bios->old_style_init) {
3119 if (bios->init_script_tbls_ptr)
3120 parse_init_table(bios, bios->init_script_tbls_ptr, &iexec);
3121 if (bios->extra_init_script_tbl_ptr)
3122 parse_init_table(bios, bios->extra_init_script_tbl_ptr, &iexec);
3123
3124 return;
3125 }
3126
3127 while ((table = ROM16(bios->data[bios->init_script_tbls_ptr + i]))) {
3128 NV_INFO(bios->dev,
3129 "Parsing VBIOS init table %d at offset 0x%04X\n",
3130 i / 2, table);
3131 BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", table);
3132
3133 parse_init_table(bios, table, &iexec);
3134 i += 2;
3135 }
3136}
3137
3138static uint16_t clkcmptable(struct nvbios *bios, uint16_t clktable, int pxclk)
3139{
3140 int compare_record_len, i = 0;
3141 uint16_t compareclk, scriptptr = 0;
3142
3143 if (bios->major_version < 5) /* pre BIT */
3144 compare_record_len = 3;
3145 else
3146 compare_record_len = 4;
3147
3148 do {
3149 compareclk = ROM16(bios->data[clktable + compare_record_len * i]);
3150 if (pxclk >= compareclk * 10) {
3151 if (bios->major_version < 5) {
3152 uint8_t tmdssub = bios->data[clktable + 2 + compare_record_len * i];
3153 scriptptr = ROM16(bios->data[bios->init_script_tbls_ptr + tmdssub * 2]);
3154 } else
3155 scriptptr = ROM16(bios->data[clktable + 2 + compare_record_len * i]);
3156 break;
3157 }
3158 i++;
3159 } while (compareclk);
3160
3161 return scriptptr;
3162}
3163
3164static void
3165run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
3166 struct dcb_entry *dcbent, int head, bool dl)
3167{
3168 struct drm_nouveau_private *dev_priv = dev->dev_private;
3169 struct nvbios *bios = &dev_priv->VBIOS;
3170 struct init_exec iexec = {true, false};
3171
3172 NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
3173 scriptptr);
3174 bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_44,
3175 head ? NV_CIO_CRE_44_HEADB : NV_CIO_CRE_44_HEADA);
3176 /* note: if dcb entries have been merged, index may be misleading */
3177 NVWriteVgaCrtc5758(dev, head, 0, dcbent->index);
3178 parse_init_table(bios, scriptptr, &iexec);
3179
3180 nv04_dfp_bind_head(dev, dcbent, head, dl);
3181}
3182
3183static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
3184{
3185 struct drm_nouveau_private *dev_priv = dev->dev_private;
3186 struct nvbios *bios = &dev_priv->VBIOS;
3187 uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
3188 uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
3189
3190 if (!bios->fp.xlated_entry || !sub || !scriptofs)
3191 return -EINVAL;
3192
3193 run_digital_op_script(dev, scriptofs, dcbent, head, bios->fp.dual_link);
3194
3195 if (script == LVDS_PANEL_OFF) {
3196 /* off-on delay in ms */
3197 msleep(ROM16(bios->data[bios->fp.xlated_entry + 7]));
3198 }
3199#ifdef __powerpc__
3200 /* Powerbook specific quirks */
3201 if (script == LVDS_RESET && ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0329))
3202 nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72);
3203 if ((dev->pci_device & 0xffff) == 0x0179 || (dev->pci_device & 0xffff) == 0x0189 || (dev->pci_device & 0xffff) == 0x0329) {
3204 if (script == LVDS_PANEL_ON) {
3205 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) | (1 << 31));
3206 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) | 1);
3207 }
3208 if (script == LVDS_PANEL_OFF) {
3209 bios_wr32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL, bios_rd32(bios, NV_PBUS_DEBUG_DUALHEAD_CTL) & ~(1 << 31));
3210 bios_wr32(bios, NV_PCRTC_GPIO_EXT, bios_rd32(bios, NV_PCRTC_GPIO_EXT) & ~3);
3211 }
3212 }
3213#endif
3214
3215 return 0;
3216}
3217
3218static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
3219{
3220 /*
3221 * The BIT LVDS table's header has the information to setup the
3222 * necessary registers. Following the standard 4 byte header are:
3223 * A bitmask byte and a dual-link transition pxclk value for use in
3224 * selecting the init script when not using straps; 4 script pointers
3225 * for panel power, selected by output and on/off; and 8 table pointers
3226 * for panel init, the needed one determined by output, and bits in the
3227 * conf byte. These tables are similar to the TMDS tables, consisting
3228 * of a list of pxclks and script pointers.
3229 */
3230 struct drm_nouveau_private *dev_priv = dev->dev_private;
3231 struct nvbios *bios = &dev_priv->VBIOS;
3232 unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
3233 uint16_t scriptptr = 0, clktable;
3234 uint8_t clktableptr = 0;
3235
3236 /*
3237 * For now we assume version 3.0 table - g80 support will need some
3238 * changes
3239 */
3240
3241 switch (script) {
3242 case LVDS_INIT:
3243 return -ENOSYS;
3244 case LVDS_BACKLIGHT_ON:
3245 case LVDS_PANEL_ON:
3246 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 7 + outputset * 2]);
3247 break;
3248 case LVDS_BACKLIGHT_OFF:
3249 case LVDS_PANEL_OFF:
3250 scriptptr = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 11 + outputset * 2]);
3251 break;
3252 case LVDS_RESET:
3253 if (dcbent->lvdsconf.use_straps_for_mode) {
3254 if (bios->fp.dual_link)
3255 clktableptr += 2;
3256 if (bios->fp.BITbit1)
3257 clktableptr++;
3258 } else {
3259 /* using EDID */
3260 uint8_t fallback = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3261 int fallbackcmpval = (dcbent->or == 4) ? 4 : 1;
3262
3263 if (bios->fp.dual_link) {
3264 clktableptr += 2;
3265 fallbackcmpval *= 2;
3266 }
3267 if (fallbackcmpval & fallback)
3268 clktableptr++;
3269 }
3270
3271 /* adding outputset * 8 may not be correct */
3272 clktable = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 15 + clktableptr * 2 + outputset * 8]);
3273 if (!clktable) {
3274 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3275 return -ENOENT;
3276 }
3277 scriptptr = clkcmptable(bios, clktable, pxclk);
3278 }
3279
3280 if (!scriptptr) {
3281 NV_ERROR(dev, "LVDS output init script not found\n");
3282 return -ENOENT;
3283 }
3284 run_digital_op_script(dev, scriptptr, dcbent, head, bios->fp.dual_link);
3285
3286 return 0;
3287}
3288
3289int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script, int pxclk)
3290{
3291 /*
3292 * LVDS operations are multiplexed in an effort to present a single API
3293 * which works with two vastly differing underlying structures.
3294 * This acts as the demux
3295 */
3296
3297 struct drm_nouveau_private *dev_priv = dev->dev_private;
3298 struct nvbios *bios = &dev_priv->VBIOS;
3299 uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3300 uint32_t sel_clk_binding, sel_clk;
3301 int ret;
3302
3303 if (bios->fp.last_script_invoc == (script << 1 | head) || !lvds_ver ||
3304 (lvds_ver >= 0x30 && script == LVDS_INIT))
3305 return 0;
3306
3307 if (!bios->fp.lvds_init_run) {
3308 bios->fp.lvds_init_run = true;
3309 call_lvds_script(dev, dcbent, head, LVDS_INIT, pxclk);
3310 }
3311
3312 if (script == LVDS_PANEL_ON && bios->fp.reset_after_pclk_change)
3313 call_lvds_script(dev, dcbent, head, LVDS_RESET, pxclk);
3314 if (script == LVDS_RESET && bios->fp.power_off_for_reset)
3315 call_lvds_script(dev, dcbent, head, LVDS_PANEL_OFF, pxclk);
3316
3317 NV_TRACE(dev, "Calling LVDS script %d:\n", script);
3318
3319 /* don't let script change pll->head binding */
3320 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
3321
3322 if (lvds_ver < 0x30)
3323 ret = call_lvds_manufacturer_script(dev, dcbent, head, script);
3324 else
3325 ret = run_lvds_table(dev, dcbent, head, script, pxclk);
3326
3327 bios->fp.last_script_invoc = (script << 1 | head);
3328
3329 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3330 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3331 /* some scripts set a value in NV_PBUS_POWERCTRL_2 and break video overlay */
3332 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
3333
3334 return ret;
3335}
3336
3337struct lvdstableheader {
3338 uint8_t lvds_ver, headerlen, recordlen;
3339};
3340
3341static int parse_lvds_manufacturer_table_header(struct drm_device *dev, struct nvbios *bios, struct lvdstableheader *lth)
3342{
3343 /*
3344 * BMP version (0xa) LVDS table has a simple header of version and
3345 * record length. The BIT LVDS table has the typical BIT table header:
3346 * version byte, header length byte, record length byte, and a byte for
3347 * the maximum number of records that can be held in the table.
3348 */
3349
3350 uint8_t lvds_ver, headerlen, recordlen;
3351
3352 memset(lth, 0, sizeof(struct lvdstableheader));
3353
3354 if (bios->fp.lvdsmanufacturerpointer == 0x0) {
3355 NV_ERROR(dev, "Pointer to LVDS manufacturer table invalid\n");
3356 return -EINVAL;
3357 }
3358
3359 lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
3360
3361 switch (lvds_ver) {
3362 case 0x0a: /* pre NV40 */
3363 headerlen = 2;
3364 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3365 break;
3366 case 0x30: /* NV4x */
3367 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3368 if (headerlen < 0x1f) {
3369 NV_ERROR(dev, "LVDS table header not understood\n");
3370 return -EINVAL;
3371 }
3372 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3373 break;
3374 case 0x40: /* G80/G90 */
3375 headerlen = bios->data[bios->fp.lvdsmanufacturerpointer + 1];
3376 if (headerlen < 0x7) {
3377 NV_ERROR(dev, "LVDS table header not understood\n");
3378 return -EINVAL;
3379 }
3380 recordlen = bios->data[bios->fp.lvdsmanufacturerpointer + 2];
3381 break;
3382 default:
3383 NV_ERROR(dev,
3384 "LVDS table revision %d.%d not currently supported\n",
3385 lvds_ver >> 4, lvds_ver & 0xf);
3386 return -ENOSYS;
3387 }
3388
3389 lth->lvds_ver = lvds_ver;
3390 lth->headerlen = headerlen;
3391 lth->recordlen = recordlen;
3392
3393 return 0;
3394}
3395
3396static int
3397get_fp_strap(struct drm_device *dev, struct nvbios *bios)
3398{
3399 struct drm_nouveau_private *dev_priv = dev->dev_private;
3400
3401 /*
3402 * The fp strap is normally dictated by the "User Strap" in
3403 * PEXTDEV_BOOT_0[20:16], but on BMP cards when bit 2 of the
3404 * Internal_Flags struct at 0x48 is set, the user strap gets overriden
3405 * by the PCI subsystem ID during POST, but not before the previous user
3406 * strap has been committed to CR58 for CR57=0xf on head A, which may be
3407 * read and used instead
3408 */
3409
3410 if (bios->major_version < 5 && bios->data[0x48] & 0x4)
3411 return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
3412
3413 if (dev_priv->card_type >= NV_50)
3414 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
3415 else
3416 return (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
3417}
3418
3419static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
3420{
3421 uint8_t *fptable;
3422 uint8_t fptable_ver, headerlen = 0, recordlen, fpentries = 0xf, fpindex;
3423 int ret, ofs, fpstrapping;
3424 struct lvdstableheader lth;
3425
3426 if (bios->fp.fptablepointer == 0x0) {
3427 /* Apple cards don't have the fp table; the laptops use DDC */
3428 /* The table is also missing on some x86 IGPs */
3429#ifndef __powerpc__
3430 NV_ERROR(dev, "Pointer to flat panel table invalid\n");
3431#endif
3432 bios->pub.digital_min_front_porch = 0x4b;
3433 return 0;
3434 }
3435
3436 fptable = &bios->data[bios->fp.fptablepointer];
3437 fptable_ver = fptable[0];
3438
3439 switch (fptable_ver) {
3440 /*
3441 * BMP version 0x5.0x11 BIOSen have version 1 like tables, but no
3442 * version field, and miss one of the spread spectrum/PWM bytes.
3443 * This could affect early GF2Go parts (not seen any appropriate ROMs
3444 * though). Here we assume that a version of 0x05 matches this case
3445 * (combining with a BMP version check would be better), as the
3446 * common case for the panel type field is 0x0005, and that is in
3447 * fact what we are reading the first byte of.
3448 */
3449 case 0x05: /* some NV10, 11, 15, 16 */
3450 recordlen = 42;
3451 ofs = -1;
3452 break;
3453 case 0x10: /* some NV15/16, and NV11+ */
3454 recordlen = 44;
3455 ofs = 0;
3456 break;
3457 case 0x20: /* NV40+ */
3458 headerlen = fptable[1];
3459 recordlen = fptable[2];
3460 fpentries = fptable[3];
3461 /*
3462 * fptable[4] is the minimum
3463 * RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
3464 */
3465 bios->pub.digital_min_front_porch = fptable[4];
3466 ofs = -7;
3467 break;
3468 default:
3469 NV_ERROR(dev,
3470 "FP table revision %d.%d not currently supported\n",
3471 fptable_ver >> 4, fptable_ver & 0xf);
3472 return -ENOSYS;
3473 }
3474
3475 if (!bios->is_mobile) /* !mobile only needs digital_min_front_porch */
3476 return 0;
3477
3478 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3479 if (ret)
3480 return ret;
3481
3482 if (lth.lvds_ver == 0x30 || lth.lvds_ver == 0x40) {
3483 bios->fp.fpxlatetableptr = bios->fp.lvdsmanufacturerpointer +
3484 lth.headerlen + 1;
3485 bios->fp.xlatwidth = lth.recordlen;
3486 }
3487 if (bios->fp.fpxlatetableptr == 0x0) {
3488 NV_ERROR(dev, "Pointer to flat panel xlat table invalid\n");
3489 return -EINVAL;
3490 }
3491
3492 fpstrapping = get_fp_strap(dev, bios);
3493
3494 fpindex = bios->data[bios->fp.fpxlatetableptr +
3495 fpstrapping * bios->fp.xlatwidth];
3496
3497 if (fpindex > fpentries) {
3498 NV_ERROR(dev, "Bad flat panel table index\n");
3499 return -ENOENT;
3500 }
3501
3502 /* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
3503 if (lth.lvds_ver > 0x10)
3504 bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
3505
3506 /*
3507 * If either the strap or xlated fpindex value are 0xf there is no
3508 * panel using a strap-derived bios mode present. this condition
3509 * includes, but is different from, the DDC panel indicator above
3510 */
3511 if (fpstrapping == 0xf || fpindex == 0xf)
3512 return 0;
3513
3514 bios->fp.mode_ptr = bios->fp.fptablepointer + headerlen +
3515 recordlen * fpindex + ofs;
3516
3517 NV_TRACE(dev, "BIOS FP mode: %dx%d (%dkHz pixel clock)\n",
3518 ROM16(bios->data[bios->fp.mode_ptr + 11]) + 1,
3519 ROM16(bios->data[bios->fp.mode_ptr + 25]) + 1,
3520 ROM16(bios->data[bios->fp.mode_ptr + 7]) * 10);
3521
3522 return 0;
3523}
3524
3525bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
3526{
3527 struct drm_nouveau_private *dev_priv = dev->dev_private;
3528 struct nvbios *bios = &dev_priv->VBIOS;
3529 uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
3530
3531 if (!mode) /* just checking whether we can produce a mode */
3532 return bios->fp.mode_ptr;
3533
3534 memset(mode, 0, sizeof(struct drm_display_mode));
3535 /*
3536 * For version 1.0 (version in byte 0):
3537 * bytes 1-2 are "panel type", including bits on whether Colour/mono,
3538 * single/dual link, and type (TFT etc.)
3539 * bytes 3-6 are bits per colour in RGBX
3540 */
3541 mode->clock = ROM16(mode_entry[7]) * 10;
3542 /* bytes 9-10 is HActive */
3543 mode->hdisplay = ROM16(mode_entry[11]) + 1;
3544 /*
3545 * bytes 13-14 is HValid Start
3546 * bytes 15-16 is HValid End
3547 */
3548 mode->hsync_start = ROM16(mode_entry[17]) + 1;
3549 mode->hsync_end = ROM16(mode_entry[19]) + 1;
3550 mode->htotal = ROM16(mode_entry[21]) + 1;
3551 /* bytes 23-24, 27-30 similarly, but vertical */
3552 mode->vdisplay = ROM16(mode_entry[25]) + 1;
3553 mode->vsync_start = ROM16(mode_entry[31]) + 1;
3554 mode->vsync_end = ROM16(mode_entry[33]) + 1;
3555 mode->vtotal = ROM16(mode_entry[35]) + 1;
3556 mode->flags |= (mode_entry[37] & 0x10) ?
3557 DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
3558 mode->flags |= (mode_entry[37] & 0x1) ?
3559 DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
3560 /*
3561 * bytes 38-39 relate to spread spectrum settings
3562 * bytes 40-43 are something to do with PWM
3563 */
3564
3565 mode->status = MODE_OK;
3566 mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
3567 drm_mode_set_name(mode);
3568 return bios->fp.mode_ptr;
3569}
3570
3571int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, bool *if_is_24bit)
3572{
3573 /*
3574 * The LVDS table header is (mostly) described in
3575 * parse_lvds_manufacturer_table_header(): the BIT header additionally
3576 * contains the dual-link transition pxclk (in 10s kHz), at byte 5 - if
3577 * straps are not being used for the panel, this specifies the frequency
3578 * at which modes should be set up in the dual link style.
3579 *
3580 * Following the header, the BMP (ver 0xa) table has several records,
3581 * indexed by a seperate xlat table, indexed in turn by the fp strap in
3582 * EXTDEV_BOOT. Each record had a config byte, followed by 6 script
3583 * numbers for use by INIT_SUB which controlled panel init and power,
3584 * and finally a dword of ms to sleep between power off and on
3585 * operations.
3586 *
3587 * In the BIT versions, the table following the header serves as an
3588 * integrated config and xlat table: the records in the table are
3589 * indexed by the FP strap nibble in EXTDEV_BOOT, and each record has
3590 * two bytes - the first as a config byte, the second for indexing the
3591 * fp mode table pointed to by the BIT 'D' table
3592 *
3593 * DDC is not used until after card init, so selecting the correct table
3594 * entry and setting the dual link flag for EDID equipped panels,
3595 * requiring tests against the native-mode pixel clock, cannot be done
3596 * until later, when this function should be called with non-zero pxclk
3597 */
3598 struct drm_nouveau_private *dev_priv = dev->dev_private;
3599 struct nvbios *bios = &dev_priv->VBIOS;
3600 int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
3601 struct lvdstableheader lth;
3602 uint16_t lvdsofs;
3603 int ret, chip_version = bios->pub.chip_version;
3604
3605 ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
3606 if (ret)
3607 return ret;
3608
3609 switch (lth.lvds_ver) {
3610 case 0x0a: /* pre NV40 */
3611 lvdsmanufacturerindex = bios->data[
3612 bios->fp.fpxlatemanufacturertableptr +
3613 fpstrapping];
3614
3615 /* we're done if this isn't the EDID panel case */
3616 if (!pxclk)
3617 break;
3618
3619 if (chip_version < 0x25) {
3620 /* nv17 behaviour
3621 *
3622 * It seems the old style lvds script pointer is reused
3623 * to select 18/24 bit colour depth for EDID panels.
3624 */
3625 lvdsmanufacturerindex =
3626 (bios->legacy.lvds_single_a_script_ptr & 1) ?
3627 2 : 0;
3628 if (pxclk >= bios->fp.duallink_transition_clk)
3629 lvdsmanufacturerindex++;
3630 } else if (chip_version < 0x30) {
3631 /* nv28 behaviour (off-chip encoder)
3632 *
3633 * nv28 does a complex dance of first using byte 121 of
3634 * the EDID to choose the lvdsmanufacturerindex, then
3635 * later attempting to match the EDID manufacturer and
3636 * product IDs in a table (signature 'pidt' (panel id
3637 * table?)), setting an lvdsmanufacturerindex of 0 and
3638 * an fp strap of the match index (or 0xf if none)
3639 */
3640 lvdsmanufacturerindex = 0;
3641 } else {
3642 /* nv31, nv34 behaviour */
3643 lvdsmanufacturerindex = 0;
3644 if (pxclk >= bios->fp.duallink_transition_clk)
3645 lvdsmanufacturerindex = 2;
3646 if (pxclk >= 140000)
3647 lvdsmanufacturerindex = 3;
3648 }
3649
3650 /*
3651 * nvidia set the high nibble of (cr57=f, cr58) to
3652 * lvdsmanufacturerindex in this case; we don't
3653 */
3654 break;
3655 case 0x30: /* NV4x */
3656 case 0x40: /* G80/G90 */
3657 lvdsmanufacturerindex = fpstrapping;
3658 break;
3659 default:
3660 NV_ERROR(dev, "LVDS table revision not currently supported\n");
3661 return -ENOSYS;
3662 }
3663
3664 lvdsofs = bios->fp.xlated_entry = bios->fp.lvdsmanufacturerpointer + lth.headerlen + lth.recordlen * lvdsmanufacturerindex;
3665 switch (lth.lvds_ver) {
3666 case 0x0a:
3667 bios->fp.power_off_for_reset = bios->data[lvdsofs] & 1;
3668 bios->fp.reset_after_pclk_change = bios->data[lvdsofs] & 2;
3669 bios->fp.dual_link = bios->data[lvdsofs] & 4;
3670 bios->fp.link_c_increment = bios->data[lvdsofs] & 8;
3671 *if_is_24bit = bios->data[lvdsofs] & 16;
3672 break;
3673 case 0x30:
3674 /*
3675 * My money would be on there being a 24 bit interface bit in
3676 * this table, but I have no example of a laptop bios with a
3677 * 24 bit panel to confirm that. Hence we shout loudly if any
3678 * bit other than bit 0 is set (I've not even seen bit 1)
3679 */
3680 if (bios->data[lvdsofs] > 1)
3681 NV_ERROR(dev,
3682 "You have a very unusual laptop display; please report it\n");
3683 /*
3684 * No sign of the "power off for reset" or "reset for panel
3685 * on" bits, but it's safer to assume we should
3686 */
3687 bios->fp.power_off_for_reset = true;
3688 bios->fp.reset_after_pclk_change = true;
3689 /*
3690 * It's ok lvdsofs is wrong for nv4x edid case; dual_link is
3691 * over-written, and BITbit1 isn't used
3692 */
3693 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3694 bios->fp.BITbit1 = bios->data[lvdsofs] & 2;
3695 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3696 break;
3697 case 0x40:
3698 bios->fp.dual_link = bios->data[lvdsofs] & 1;
3699 bios->fp.if_is_24bit = bios->data[lvdsofs] & 2;
3700 bios->fp.strapless_is_24bit = bios->data[bios->fp.lvdsmanufacturerpointer + 4];
3701 bios->fp.duallink_transition_clk = ROM16(bios->data[bios->fp.lvdsmanufacturerpointer + 5]) * 10;
3702 break;
3703 }
3704
3705 /* set dual_link flag for EDID case */
3706 if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
3707 bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
3708
3709 *dl = bios->fp.dual_link;
3710
3711 return 0;
3712}
3713
3714static uint8_t *
3715bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
3716 uint16_t record, int record_len, int record_nr)
3717{
3718 struct drm_nouveau_private *dev_priv = dev->dev_private;
3719 struct nvbios *bios = &dev_priv->VBIOS;
3720 uint32_t entry;
3721 uint16_t table;
3722 int i, v;
3723
3724 for (i = 0; i < record_nr; i++, record += record_len) {
3725 table = ROM16(bios->data[record]);
3726 if (!table)
3727 continue;
3728 entry = ROM32(bios->data[table]);
3729
3730 v = (entry & 0x000f0000) >> 16;
3731 if (!(v & dcbent->or))
3732 continue;
3733
3734 v = (entry & 0x000000f0) >> 4;
3735 if (v != dcbent->location)
3736 continue;
3737
3738 v = (entry & 0x0000000f);
3739 if (v != dcbent->type)
3740 continue;
3741
3742 return &bios->data[table];
3743 }
3744
3745 return NULL;
3746}
3747
3748void *
3749nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
3750 int *length)
3751{
3752 struct drm_nouveau_private *dev_priv = dev->dev_private;
3753 struct nvbios *bios = &dev_priv->VBIOS;
3754 uint8_t *table;
3755
3756 if (!bios->display.dp_table_ptr) {
3757 NV_ERROR(dev, "No pointer to DisplayPort table\n");
3758 return NULL;
3759 }
3760 table = &bios->data[bios->display.dp_table_ptr];
3761
3762 if (table[0] != 0x21) {
3763 NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
3764 table[0]);
3765 return NULL;
3766 }
3767
3768 *length = table[4];
3769 return bios_output_config_match(dev, dcbent,
3770 bios->display.dp_table_ptr + table[1],
3771 table[2], table[3]);
3772}
3773
3774int
3775nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
3776 uint32_t sub, int pxclk)
3777{
3778 /*
3779 * The display script table is located by the BIT 'U' table.
3780 *
3781 * It contains an array of pointers to various tables describing
3782 * a particular output type. The first 32-bits of the output
3783 * tables contains similar information to a DCB entry, and is
3784 * used to decide whether that particular table is suitable for
3785 * the output you want to access.
3786 *
3787 * The "record header length" field here seems to indicate the
3788 * offset of the first configuration entry in the output tables.
3789 * This is 10 on most cards I've seen, but 12 has been witnessed
3790 * on DP cards, and there's another script pointer within the
3791 * header.
3792 *
3793 * offset + 0 ( 8 bits): version
3794 * offset + 1 ( 8 bits): header length
3795 * offset + 2 ( 8 bits): record length
3796 * offset + 3 ( 8 bits): number of records
3797 * offset + 4 ( 8 bits): record header length
3798 * offset + 5 (16 bits): pointer to first output script table
3799 */
3800
3801 struct drm_nouveau_private *dev_priv = dev->dev_private;
3802 struct init_exec iexec = {true, false};
3803 struct nvbios *bios = &dev_priv->VBIOS;
3804 uint8_t *table = &bios->data[bios->display.script_table_ptr];
3805 uint8_t *otable = NULL;
3806 uint16_t script;
3807 int i = 0;
3808
3809 if (!bios->display.script_table_ptr) {
3810 NV_ERROR(dev, "No pointer to output script table\n");
3811 return 1;
3812 }
3813
3814 /*
3815 * Nothing useful has been in any of the pre-2.0 tables I've seen,
3816 * so until they are, we really don't need to care.
3817 */
3818 if (table[0] < 0x20)
3819 return 1;
3820
3821 if (table[0] != 0x20 && table[0] != 0x21) {
3822 NV_ERROR(dev, "Output script table version 0x%02x unknown\n",
3823 table[0]);
3824 return 1;
3825 }
3826
3827 /*
3828 * The output script tables describing a particular output type
3829 * look as follows:
3830 *
3831 * offset + 0 (32 bits): output this table matches (hash of DCB)
3832 * offset + 4 ( 8 bits): unknown
3833 * offset + 5 ( 8 bits): number of configurations
3834 * offset + 6 (16 bits): pointer to some script
3835 * offset + 8 (16 bits): pointer to some script
3836 *
3837 * headerlen == 10
3838 * offset + 10 : configuration 0
3839 *
3840 * headerlen == 12
3841 * offset + 10 : pointer to some script
3842 * offset + 12 : configuration 0
3843 *
3844 * Each config entry is as follows:
3845 *
3846 * offset + 0 (16 bits): unknown, assumed to be a match value
3847 * offset + 2 (16 bits): pointer to script table (clock set?)
3848 * offset + 4 (16 bits): pointer to script table (reset?)
3849 *
3850 * There doesn't appear to be a count value to say how many
3851 * entries exist in each script table, instead, a 0 value in
3852 * the first 16-bit word seems to indicate both the end of the
3853 * list and the default entry. The second 16-bit word in the
3854 * script tables is a pointer to the script to execute.
3855 */
3856
3857 NV_DEBUG(dev, "Searching for output entry for %d %d %d\n",
3858 dcbent->type, dcbent->location, dcbent->or);
3859 otable = bios_output_config_match(dev, dcbent, table[1] +
3860 bios->display.script_table_ptr,
3861 table[2], table[3]);
3862 if (!otable) {
3863 NV_ERROR(dev, "Couldn't find matching output script table\n");
3864 return 1;
3865 }
3866
3867 if (pxclk < -2 || pxclk > 0) {
3868 /* Try to find matching script table entry */
3869 for (i = 0; i < otable[5]; i++) {
3870 if (ROM16(otable[table[4] + i*6]) == sub)
3871 break;
3872 }
3873
3874 if (i == otable[5]) {
3875 NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
3876 "using first\n",
3877 sub, dcbent->type, dcbent->or);
3878 i = 0;
3879 }
3880 }
3881
3882 bios->display.output = dcbent;
3883
3884 if (pxclk == 0) {
3885 script = ROM16(otable[6]);
3886 if (!script) {
3887 NV_DEBUG(dev, "output script 0 not found\n");
3888 return 1;
3889 }
3890
3891 NV_TRACE(dev, "0x%04X: parsing output script 0\n", script);
3892 parse_init_table(bios, script, &iexec);
3893 } else
3894 if (pxclk == -1) {
3895 script = ROM16(otable[8]);
3896 if (!script) {
3897 NV_DEBUG(dev, "output script 1 not found\n");
3898 return 1;
3899 }
3900
3901 NV_TRACE(dev, "0x%04X: parsing output script 1\n", script);
3902 parse_init_table(bios, script, &iexec);
3903 } else
3904 if (pxclk == -2) {
3905 if (table[4] >= 12)
3906 script = ROM16(otable[10]);
3907 else
3908 script = 0;
3909 if (!script) {
3910 NV_DEBUG(dev, "output script 2 not found\n");
3911 return 1;
3912 }
3913
3914 NV_TRACE(dev, "0x%04X: parsing output script 2\n", script);
3915 parse_init_table(bios, script, &iexec);
3916 } else
3917 if (pxclk > 0) {
3918 script = ROM16(otable[table[4] + i*6 + 2]);
3919 if (script)
3920 script = clkcmptable(bios, script, pxclk);
3921 if (!script) {
3922 NV_ERROR(dev, "clock script 0 not found\n");
3923 return 1;
3924 }
3925
3926 NV_TRACE(dev, "0x%04X: parsing clock script 0\n", script);
3927 parse_init_table(bios, script, &iexec);
3928 } else
3929 if (pxclk < 0) {
3930 script = ROM16(otable[table[4] + i*6 + 4]);
3931 if (script)
3932 script = clkcmptable(bios, script, -pxclk);
3933 if (!script) {
3934 NV_DEBUG(dev, "clock script 1 not found\n");
3935 return 1;
3936 }
3937
3938 NV_TRACE(dev, "0x%04X: parsing clock script 1\n", script);
3939 parse_init_table(bios, script, &iexec);
3940 }
3941
3942 return 0;
3943}
3944
3945
3946int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, int pxclk)
3947{
3948 /*
3949 * the pxclk parameter is in kHz
3950 *
3951 * This runs the TMDS regs setting code found on BIT bios cards
3952 *
3953 * For ffs(or) == 1 use the first table, for ffs(or) == 2 and
3954 * ffs(or) == 3, use the second.
3955 */
3956
3957 struct drm_nouveau_private *dev_priv = dev->dev_private;
3958 struct nvbios *bios = &dev_priv->VBIOS;
3959 int cv = bios->pub.chip_version;
3960 uint16_t clktable = 0, scriptptr;
3961 uint32_t sel_clk_binding, sel_clk;
3962
3963 /* pre-nv17 off-chip tmds uses scripts, post nv17 doesn't */
3964 if (cv >= 0x17 && cv != 0x1a && cv != 0x20 &&
3965 dcbent->location != DCB_LOC_ON_CHIP)
3966 return 0;
3967
3968 switch (ffs(dcbent->or)) {
3969 case 1:
3970 clktable = bios->tmds.output0_script_ptr;
3971 break;
3972 case 2:
3973 case 3:
3974 clktable = bios->tmds.output1_script_ptr;
3975 break;
3976 }
3977
3978 if (!clktable) {
3979 NV_ERROR(dev, "Pixel clock comparison table not found\n");
3980 return -EINVAL;
3981 }
3982
3983 scriptptr = clkcmptable(bios, clktable, pxclk);
3984
3985 if (!scriptptr) {
3986 NV_ERROR(dev, "TMDS output init script not found\n");
3987 return -ENOENT;
3988 }
3989
3990 /* don't let script change pll->head binding */
3991 sel_clk_binding = bios_rd32(bios, NV_PRAMDAC_SEL_CLK) & 0x50000;
3992 run_digital_op_script(dev, scriptptr, dcbent, head, pxclk >= 165000);
3993 sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK) & ~0x50000;
3994 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, sel_clk | sel_clk_binding);
3995
3996 return 0;
3997}
3998
3999int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
4000{
4001 /*
4002 * PLL limits table
4003 *
4004 * Version 0x10: NV30, NV31
4005 * One byte header (version), one record of 24 bytes
4006 * Version 0x11: NV36 - Not implemented
4007 * Seems to have same record style as 0x10, but 3 records rather than 1
4008 * Version 0x20: Found on Geforce 6 cards
4009 * Trivial 4 byte BIT header. 31 (0x1f) byte record length
4010 * Version 0x21: Found on Geforce 7, 8 and some Geforce 6 cards
4011 * 5 byte header, fifth byte of unknown purpose. 35 (0x23) byte record
4012 * length in general, some (integrated) have an extra configuration byte
4013 * Version 0x30: Found on Geforce 8, separates the register mapping
4014 * from the limits tables.
4015 */
4016
4017 struct drm_nouveau_private *dev_priv = dev->dev_private;
4018 struct nvbios *bios = &dev_priv->VBIOS;
4019 int cv = bios->pub.chip_version, pllindex = 0;
4020 uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
4021 uint32_t crystal_strap_mask, crystal_straps;
4022
4023 if (!bios->pll_limit_tbl_ptr) {
4024 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
4025 cv >= 0x40) {
4026 NV_ERROR(dev, "Pointer to PLL limits table invalid\n");
4027 return -EINVAL;
4028 }
4029 } else
4030 pll_lim_ver = bios->data[bios->pll_limit_tbl_ptr];
4031
4032 crystal_strap_mask = 1 << 6;
4033 /* open coded dev->twoHeads test */
4034 if (cv > 0x10 && cv != 0x15 && cv != 0x1a && cv != 0x20)
4035 crystal_strap_mask |= 1 << 22;
4036 crystal_straps = nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) &
4037 crystal_strap_mask;
4038
4039 switch (pll_lim_ver) {
4040 /*
4041 * We use version 0 to indicate a pre limit table bios (single stage
4042 * pll) and load the hard coded limits instead.
4043 */
4044 case 0:
4045 break;
4046 case 0x10:
4047 case 0x11:
4048 /*
4049 * Strictly v0x11 has 3 entries, but the last two don't seem
4050 * to get used.
4051 */
4052 headerlen = 1;
4053 recordlen = 0x18;
4054 entries = 1;
4055 pllindex = 0;
4056 break;
4057 case 0x20:
4058 case 0x21:
4059 case 0x30:
4060 case 0x40:
4061 headerlen = bios->data[bios->pll_limit_tbl_ptr + 1];
4062 recordlen = bios->data[bios->pll_limit_tbl_ptr + 2];
4063 entries = bios->data[bios->pll_limit_tbl_ptr + 3];
4064 break;
4065 default:
4066 NV_ERROR(dev, "PLL limits table revision 0x%X not currently "
4067 "supported\n", pll_lim_ver);
4068 return -ENOSYS;
4069 }
4070
4071 /* initialize all members to zero */
4072 memset(pll_lim, 0, sizeof(struct pll_lims));
4073
4074 if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
4075 uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
4076
4077 pll_lim->vco1.minfreq = ROM32(pll_rec[0]);
4078 pll_lim->vco1.maxfreq = ROM32(pll_rec[4]);
4079 pll_lim->vco2.minfreq = ROM32(pll_rec[8]);
4080 pll_lim->vco2.maxfreq = ROM32(pll_rec[12]);
4081 pll_lim->vco1.min_inputfreq = ROM32(pll_rec[16]);
4082 pll_lim->vco2.min_inputfreq = ROM32(pll_rec[20]);
4083 pll_lim->vco1.max_inputfreq = pll_lim->vco2.max_inputfreq = INT_MAX;
4084
4085 /* these values taken from nv30/31/36 */
4086 pll_lim->vco1.min_n = 0x1;
4087 if (cv == 0x36)
4088 pll_lim->vco1.min_n = 0x5;
4089 pll_lim->vco1.max_n = 0xff;
4090 pll_lim->vco1.min_m = 0x1;
4091 pll_lim->vco1.max_m = 0xd;
4092 pll_lim->vco2.min_n = 0x4;
4093 /*
4094 * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this
4095 * table version (apart from nv35)), N2 is compared to
4096 * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and
4097 * save a comparison
4098 */
4099 pll_lim->vco2.max_n = 0x28;
4100 if (cv == 0x30 || cv == 0x35)
4101 /* only 5 bits available for N2 on nv30/35 */
4102 pll_lim->vco2.max_n = 0x1f;
4103 pll_lim->vco2.min_m = 0x1;
4104 pll_lim->vco2.max_m = 0x4;
4105 pll_lim->max_log2p = 0x7;
4106 pll_lim->max_usable_log2p = 0x6;
4107 } else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
4108 uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
4109 uint32_t reg = 0; /* default match */
4110 uint8_t *pll_rec;
4111 int i;
4112
4113 /*
4114 * First entry is default match, if nothing better. warn if
4115 * reg field nonzero
4116 */
4117 if (ROM32(bios->data[plloffs]))
4118 NV_WARN(dev, "Default PLL limit entry has non-zero "
4119 "register field\n");
4120
4121 if (limit_match > MAX_PLL_TYPES)
4122 /* we've been passed a reg as the match */
4123 reg = limit_match;
4124 else /* limit match is a pll type */
4125 for (i = 1; i < entries && !reg; i++) {
4126 uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
4127
4128 if (limit_match == NVPLL &&
4129 (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
4130 reg = cmpreg;
4131 if (limit_match == MPLL &&
4132 (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
4133 reg = cmpreg;
4134 if (limit_match == VPLL1 &&
4135 (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
4136 reg = cmpreg;
4137 if (limit_match == VPLL2 &&
4138 (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
4139 reg = cmpreg;
4140 }
4141
4142 for (i = 1; i < entries; i++)
4143 if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
4144 pllindex = i;
4145 break;
4146 }
4147
4148 pll_rec = &bios->data[plloffs + recordlen * pllindex];
4149
4150 BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
4151 pllindex ? reg : 0);
4152
4153 /*
4154 * Frequencies are stored in tables in MHz, kHz are more
4155 * useful, so we convert.
4156 */
4157
4158 /* What output frequencies can each VCO generate? */
4159 pll_lim->vco1.minfreq = ROM16(pll_rec[4]) * 1000;
4160 pll_lim->vco1.maxfreq = ROM16(pll_rec[6]) * 1000;
4161 pll_lim->vco2.minfreq = ROM16(pll_rec[8]) * 1000;
4162 pll_lim->vco2.maxfreq = ROM16(pll_rec[10]) * 1000;
4163
4164 /* What input frequencies they accept (past the m-divider)? */
4165 pll_lim->vco1.min_inputfreq = ROM16(pll_rec[12]) * 1000;
4166 pll_lim->vco2.min_inputfreq = ROM16(pll_rec[14]) * 1000;
4167 pll_lim->vco1.max_inputfreq = ROM16(pll_rec[16]) * 1000;
4168 pll_lim->vco2.max_inputfreq = ROM16(pll_rec[18]) * 1000;
4169
4170 /* What values are accepted as multiplier and divider? */
4171 pll_lim->vco1.min_n = pll_rec[20];
4172 pll_lim->vco1.max_n = pll_rec[21];
4173 pll_lim->vco1.min_m = pll_rec[22];
4174 pll_lim->vco1.max_m = pll_rec[23];
4175 pll_lim->vco2.min_n = pll_rec[24];
4176 pll_lim->vco2.max_n = pll_rec[25];
4177 pll_lim->vco2.min_m = pll_rec[26];
4178 pll_lim->vco2.max_m = pll_rec[27];
4179
4180 pll_lim->max_usable_log2p = pll_lim->max_log2p = pll_rec[29];
4181 if (pll_lim->max_log2p > 0x7)
4182 /* pll decoding in nv_hw.c assumes never > 7 */
4183 NV_WARN(dev, "Max log2 P value greater than 7 (%d)\n",
4184 pll_lim->max_log2p);
4185 if (cv < 0x60)
4186 pll_lim->max_usable_log2p = 0x6;
4187 pll_lim->log2p_bias = pll_rec[30];
4188
4189 if (recordlen > 0x22)
4190 pll_lim->refclk = ROM32(pll_rec[31]);
4191
4192 if (recordlen > 0x23 && pll_rec[35])
4193 NV_WARN(dev,
4194 "Bits set in PLL configuration byte (%x)\n",
4195 pll_rec[35]);
4196
4197 /* C51 special not seen elsewhere */
4198 if (cv == 0x51 && !pll_lim->refclk) {
4199 uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
4200
4201 if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
4202 ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
4203 if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
4204 pll_lim->refclk = 200000;
4205 else
4206 pll_lim->refclk = 25000;
4207 }
4208 }
4209 } else if (pll_lim_ver == 0x30) { /* ver 0x30 */
4210 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4211 uint8_t *record = NULL;
4212 int i;
4213
4214 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4215 limit_match);
4216
4217 for (i = 0; i < entries; i++, entry += recordlen) {
4218 if (ROM32(entry[3]) == limit_match) {
4219 record = &bios->data[ROM16(entry[1])];
4220 break;
4221 }
4222 }
4223
4224 if (!record) {
4225 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4226 "limits table", limit_match);
4227 return -ENOENT;
4228 }
4229
4230 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4231 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4232 pll_lim->vco2.minfreq = ROM16(record[4]) * 1000;
4233 pll_lim->vco2.maxfreq = ROM16(record[6]) * 1000;
4234 pll_lim->vco1.min_inputfreq = ROM16(record[8]) * 1000;
4235 pll_lim->vco2.min_inputfreq = ROM16(record[10]) * 1000;
4236 pll_lim->vco1.max_inputfreq = ROM16(record[12]) * 1000;
4237 pll_lim->vco2.max_inputfreq = ROM16(record[14]) * 1000;
4238 pll_lim->vco1.min_n = record[16];
4239 pll_lim->vco1.max_n = record[17];
4240 pll_lim->vco1.min_m = record[18];
4241 pll_lim->vco1.max_m = record[19];
4242 pll_lim->vco2.min_n = record[20];
4243 pll_lim->vco2.max_n = record[21];
4244 pll_lim->vco2.min_m = record[22];
4245 pll_lim->vco2.max_m = record[23];
4246 pll_lim->max_usable_log2p = pll_lim->max_log2p = record[25];
4247 pll_lim->log2p_bias = record[27];
4248 pll_lim->refclk = ROM32(record[28]);
4249 } else if (pll_lim_ver) { /* ver 0x40 */
4250 uint8_t *entry = &bios->data[bios->pll_limit_tbl_ptr + headerlen];
4251 uint8_t *record = NULL;
4252 int i;
4253
4254 BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
4255 limit_match);
4256
4257 for (i = 0; i < entries; i++, entry += recordlen) {
4258 if (ROM32(entry[3]) == limit_match) {
4259 record = &bios->data[ROM16(entry[1])];
4260 break;
4261 }
4262 }
4263
4264 if (!record) {
4265 NV_ERROR(dev, "Register 0x%08x not found in PLL "
4266 "limits table", limit_match);
4267 return -ENOENT;
4268 }
4269
4270 pll_lim->vco1.minfreq = ROM16(record[0]) * 1000;
4271 pll_lim->vco1.maxfreq = ROM16(record[2]) * 1000;
4272 pll_lim->vco1.min_inputfreq = ROM16(record[4]) * 1000;
4273 pll_lim->vco1.max_inputfreq = ROM16(record[6]) * 1000;
4274 pll_lim->vco1.min_m = record[8];
4275 pll_lim->vco1.max_m = record[9];
4276 pll_lim->vco1.min_n = record[10];
4277 pll_lim->vco1.max_n = record[11];
4278 pll_lim->min_p = record[12];
4279 pll_lim->max_p = record[13];
4280 /* where did this go to?? */
4281 if (limit_match == 0x00614100 || limit_match == 0x00614900)
4282 pll_lim->refclk = 27000;
4283 else
4284 pll_lim->refclk = 100000;
4285 }
4286
4287 /*
4288 * By now any valid limit table ought to have set a max frequency for
4289 * vco1, so if it's zero it's either a pre limit table bios, or one
4290 * with an empty limit table (seen on nv18)
4291 */
4292 if (!pll_lim->vco1.maxfreq) {
4293 pll_lim->vco1.minfreq = bios->fminvco;
4294 pll_lim->vco1.maxfreq = bios->fmaxvco;
4295 pll_lim->vco1.min_inputfreq = 0;
4296 pll_lim->vco1.max_inputfreq = INT_MAX;
4297 pll_lim->vco1.min_n = 0x1;
4298 pll_lim->vco1.max_n = 0xff;
4299 pll_lim->vco1.min_m = 0x1;
4300 if (crystal_straps == 0) {
4301 /* nv05 does this, nv11 doesn't, nv10 unknown */
4302 if (cv < 0x11)
4303 pll_lim->vco1.min_m = 0x7;
4304 pll_lim->vco1.max_m = 0xd;
4305 } else {
4306 if (cv < 0x11)
4307 pll_lim->vco1.min_m = 0x8;
4308 pll_lim->vco1.max_m = 0xe;
4309 }
4310 if (cv < 0x17 || cv == 0x1a || cv == 0x20)
4311 pll_lim->max_log2p = 4;
4312 else
4313 pll_lim->max_log2p = 5;
4314 pll_lim->max_usable_log2p = pll_lim->max_log2p;
4315 }
4316
4317 if (!pll_lim->refclk)
4318 switch (crystal_straps) {
4319 case 0:
4320 pll_lim->refclk = 13500;
4321 break;
4322 case (1 << 6):
4323 pll_lim->refclk = 14318;
4324 break;
4325 case (1 << 22):
4326 pll_lim->refclk = 27000;
4327 break;
4328 case (1 << 22 | 1 << 6):
4329 pll_lim->refclk = 25000;
4330 break;
4331 }
4332
4333#if 0 /* for easy debugging */
4334 ErrorF("pll.vco1.minfreq: %d\n", pll_lim->vco1.minfreq);
4335 ErrorF("pll.vco1.maxfreq: %d\n", pll_lim->vco1.maxfreq);
4336 ErrorF("pll.vco2.minfreq: %d\n", pll_lim->vco2.minfreq);
4337 ErrorF("pll.vco2.maxfreq: %d\n", pll_lim->vco2.maxfreq);
4338
4339 ErrorF("pll.vco1.min_inputfreq: %d\n", pll_lim->vco1.min_inputfreq);
4340 ErrorF("pll.vco1.max_inputfreq: %d\n", pll_lim->vco1.max_inputfreq);
4341 ErrorF("pll.vco2.min_inputfreq: %d\n", pll_lim->vco2.min_inputfreq);
4342 ErrorF("pll.vco2.max_inputfreq: %d\n", pll_lim->vco2.max_inputfreq);
4343
4344 ErrorF("pll.vco1.min_n: %d\n", pll_lim->vco1.min_n);
4345 ErrorF("pll.vco1.max_n: %d\n", pll_lim->vco1.max_n);
4346 ErrorF("pll.vco1.min_m: %d\n", pll_lim->vco1.min_m);
4347 ErrorF("pll.vco1.max_m: %d\n", pll_lim->vco1.max_m);
4348 ErrorF("pll.vco2.min_n: %d\n", pll_lim->vco2.min_n);
4349 ErrorF("pll.vco2.max_n: %d\n", pll_lim->vco2.max_n);
4350 ErrorF("pll.vco2.min_m: %d\n", pll_lim->vco2.min_m);
4351 ErrorF("pll.vco2.max_m: %d\n", pll_lim->vco2.max_m);
4352
4353 ErrorF("pll.max_log2p: %d\n", pll_lim->max_log2p);
4354 ErrorF("pll.log2p_bias: %d\n", pll_lim->log2p_bias);
4355
4356 ErrorF("pll.refclk: %d\n", pll_lim->refclk);
4357#endif
4358
4359 return 0;
4360}
4361
4362static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint16_t offset)
4363{
4364 /*
4365 * offset + 0 (8 bits): Micro version
4366 * offset + 1 (8 bits): Minor version
4367 * offset + 2 (8 bits): Chip version
4368 * offset + 3 (8 bits): Major version
4369 */
4370
4371 bios->major_version = bios->data[offset + 3];
4372 bios->pub.chip_version = bios->data[offset + 2];
4373 NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
4374 bios->data[offset + 3], bios->data[offset + 2],
4375 bios->data[offset + 1], bios->data[offset]);
4376}
4377
4378static void parse_script_table_pointers(struct nvbios *bios, uint16_t offset)
4379{
4380 /*
4381 * Parses the init table segment for pointers used in script execution.
4382 *
4383 * offset + 0 (16 bits): init script tables pointer
4384 * offset + 2 (16 bits): macro index table pointer
4385 * offset + 4 (16 bits): macro table pointer
4386 * offset + 6 (16 bits): condition table pointer
4387 * offset + 8 (16 bits): io condition table pointer
4388 * offset + 10 (16 bits): io flag condition table pointer
4389 * offset + 12 (16 bits): init function table pointer
4390 */
4391
4392 bios->init_script_tbls_ptr = ROM16(bios->data[offset]);
4393 bios->macro_index_tbl_ptr = ROM16(bios->data[offset + 2]);
4394 bios->macro_tbl_ptr = ROM16(bios->data[offset + 4]);
4395 bios->condition_tbl_ptr = ROM16(bios->data[offset + 6]);
4396 bios->io_condition_tbl_ptr = ROM16(bios->data[offset + 8]);
4397 bios->io_flag_condition_tbl_ptr = ROM16(bios->data[offset + 10]);
4398 bios->init_function_tbl_ptr = ROM16(bios->data[offset + 12]);
4399}
4400
4401static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4402{
4403 /*
4404 * Parses the load detect values for g80 cards.
4405 *
4406 * offset + 0 (16 bits): loadval table pointer
4407 */
4408
4409 uint16_t load_table_ptr;
4410 uint8_t version, headerlen, entrylen, num_entries;
4411
4412 if (bitentry->length != 3) {
4413 NV_ERROR(dev, "Do not understand BIT A table\n");
4414 return -EINVAL;
4415 }
4416
4417 load_table_ptr = ROM16(bios->data[bitentry->offset]);
4418
4419 if (load_table_ptr == 0x0) {
4420 NV_ERROR(dev, "Pointer to BIT loadval table invalid\n");
4421 return -EINVAL;
4422 }
4423
4424 version = bios->data[load_table_ptr];
4425
4426 if (version != 0x10) {
4427 NV_ERROR(dev, "BIT loadval table version %d.%d not supported\n",
4428 version >> 4, version & 0xF);
4429 return -ENOSYS;
4430 }
4431
4432 headerlen = bios->data[load_table_ptr + 1];
4433 entrylen = bios->data[load_table_ptr + 2];
4434 num_entries = bios->data[load_table_ptr + 3];
4435
4436 if (headerlen != 4 || entrylen != 4 || num_entries != 2) {
4437 NV_ERROR(dev, "Do not understand BIT loadval table\n");
4438 return -EINVAL;
4439 }
4440
4441 /* First entry is normal dac, 2nd tv-out perhaps? */
4442 bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
4443
4444 return 0;
4445}
4446
4447static int parse_bit_C_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4448{
4449 /*
4450 * offset + 8 (16 bits): PLL limits table pointer
4451 *
4452 * There's more in here, but that's unknown.
4453 */
4454
4455 if (bitentry->length < 10) {
4456 NV_ERROR(dev, "Do not understand BIT C table\n");
4457 return -EINVAL;
4458 }
4459
4460 bios->pll_limit_tbl_ptr = ROM16(bios->data[bitentry->offset + 8]);
4461
4462 return 0;
4463}
4464
4465static int parse_bit_display_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4466{
4467 /*
4468 * Parses the flat panel table segment that the bit entry points to.
4469 * Starting at bitentry->offset:
4470 *
4471 * offset + 0 (16 bits): ??? table pointer - seems to have 18 byte
4472 * records beginning with a freq.
4473 * offset + 2 (16 bits): mode table pointer
4474 */
4475
4476 if (bitentry->length != 4) {
4477 NV_ERROR(dev, "Do not understand BIT display table\n");
4478 return -EINVAL;
4479 }
4480
4481 bios->fp.fptablepointer = ROM16(bios->data[bitentry->offset + 2]);
4482
4483 return 0;
4484}
4485
4486static int parse_bit_init_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4487{
4488 /*
4489 * Parses the init table segment that the bit entry points to.
4490 *
4491 * See parse_script_table_pointers for layout
4492 */
4493
4494 if (bitentry->length < 14) {
4495 NV_ERROR(dev, "Do not understand init table\n");
4496 return -EINVAL;
4497 }
4498
4499 parse_script_table_pointers(bios, bitentry->offset);
4500
4501 if (bitentry->length >= 16)
4502 bios->some_script_ptr = ROM16(bios->data[bitentry->offset + 14]);
4503 if (bitentry->length >= 18)
4504 bios->init96_tbl_ptr = ROM16(bios->data[bitentry->offset + 16]);
4505
4506 return 0;
4507}
4508
4509static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4510{
4511 /*
4512 * BIT 'i' (info?) table
4513 *
4514 * offset + 0 (32 bits): BIOS version dword (as in B table)
4515 * offset + 5 (8 bits): BIOS feature byte (same as for BMP?)
4516 * offset + 13 (16 bits): pointer to table containing DAC load
4517 * detection comparison values
4518 *
4519 * There's other things in the table, purpose unknown
4520 */
4521
4522 uint16_t daccmpoffset;
4523 uint8_t dacver, dacheaderlen;
4524
4525 if (bitentry->length < 6) {
4526 NV_ERROR(dev, "BIT i table too short for needed information\n");
4527 return -EINVAL;
4528 }
4529
4530 parse_bios_version(dev, bios, bitentry->offset);
4531
4532 /*
4533 * bit 4 seems to indicate a mobile bios (doesn't suffer from BMP's
4534 * Quadro identity crisis), other bits possibly as for BMP feature byte
4535 */
4536 bios->feature_byte = bios->data[bitentry->offset + 5];
4537 bios->is_mobile = bios->feature_byte & FEATURE_MOBILE;
4538
4539 if (bitentry->length < 15) {
4540 NV_WARN(dev, "BIT i table not long enough for DAC load "
4541 "detection comparison table\n");
4542 return -EINVAL;
4543 }
4544
4545 daccmpoffset = ROM16(bios->data[bitentry->offset + 13]);
4546
4547 /* doesn't exist on g80 */
4548 if (!daccmpoffset)
4549 return 0;
4550
4551 /*
4552 * The first value in the table, following the header, is the
4553 * comparison value, the second entry is a comparison value for
4554 * TV load detection.
4555 */
4556
4557 dacver = bios->data[daccmpoffset];
4558 dacheaderlen = bios->data[daccmpoffset + 1];
4559
4560 if (dacver != 0x00 && dacver != 0x10) {
4561 NV_WARN(dev, "DAC load detection comparison table version "
4562 "%d.%d not known\n", dacver >> 4, dacver & 0xf);
4563 return -ENOSYS;
4564 }
4565
4566 bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
4567 bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
4568
4569 return 0;
4570}
4571
4572static int parse_bit_lvds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4573{
4574 /*
4575 * Parses the LVDS table segment that the bit entry points to.
4576 * Starting at bitentry->offset:
4577 *
4578 * offset + 0 (16 bits): LVDS strap xlate table pointer
4579 */
4580
4581 if (bitentry->length != 2) {
4582 NV_ERROR(dev, "Do not understand BIT LVDS table\n");
4583 return -EINVAL;
4584 }
4585
4586 /*
4587 * No idea if it's still called the LVDS manufacturer table, but
4588 * the concept's close enough.
4589 */
4590 bios->fp.lvdsmanufacturerpointer = ROM16(bios->data[bitentry->offset]);
4591
4592 return 0;
4593}
4594
4595static int
4596parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4597 struct bit_entry *bitentry)
4598{
4599 /*
4600 * offset + 2 (8 bits): number of options in an
4601 * INIT_RAM_RESTRICT_ZM_REG_GROUP opcode option set
4602 * offset + 3 (16 bits): pointer to strap xlate table for RAM
4603 * restrict option selection
4604 *
4605 * There's a bunch of bits in this table other than the RAM restrict
4606 * stuff that we don't use - their use currently unknown
4607 */
4608
4609 uint16_t rr_strap_xlat;
4610 uint8_t rr_group_count;
4611 int i;
4612
4613 /*
4614 * Older bios versions don't have a sufficiently long table for
4615 * what we want
4616 */
4617 if (bitentry->length < 0x5)
4618 return 0;
4619
4620 if (bitentry->id[1] < 2) {
4621 rr_group_count = bios->data[bitentry->offset + 2];
4622 rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]);
4623 } else {
4624 rr_group_count = bios->data[bitentry->offset + 0];
4625 rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]);
4626 }
4627
4628 /* adjust length of INIT_87 */
4629 for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++);
4630 itbl_entry[i].length += rr_group_count * 4;
4631
4632 /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */
4633 for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++);
4634 itbl_entry[i].length_multiplier = rr_group_count * 4;
4635
4636 init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier;
4637 bios->ram_restrict_tbl_ptr = rr_strap_xlat;
4638
4639 return 0;
4640}
4641
4642static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios, struct bit_entry *bitentry)
4643{
4644 /*
4645 * Parses the pointer to the TMDS table
4646 *
4647 * Starting at bitentry->offset:
4648 *
4649 * offset + 0 (16 bits): TMDS table pointer
4650 *
4651 * The TMDS table is typically found just before the DCB table, with a
4652 * characteristic signature of 0x11,0x13 (1.1 being version, 0x13 being
4653 * length?)
4654 *
4655 * At offset +7 is a pointer to a script, which I don't know how to
4656 * run yet.
4657 * At offset +9 is a pointer to another script, likewise
4658 * Offset +11 has a pointer to a table where the first word is a pxclk
4659 * frequency and the second word a pointer to a script, which should be
4660 * run if the comparison pxclk frequency is less than the pxclk desired.
4661 * This repeats for decreasing comparison frequencies
4662 * Offset +13 has a pointer to a similar table
4663 * The selection of table (and possibly +7/+9 script) is dictated by
4664 * "or" from the DCB.
4665 */
4666
4667 uint16_t tmdstableptr, script1, script2;
4668
4669 if (bitentry->length != 2) {
4670 NV_ERROR(dev, "Do not understand BIT TMDS table\n");
4671 return -EINVAL;
4672 }
4673
4674 tmdstableptr = ROM16(bios->data[bitentry->offset]);
4675
4676 if (tmdstableptr == 0x0) {
4677 NV_ERROR(dev, "Pointer to TMDS table invalid\n");
4678 return -EINVAL;
4679 }
4680
4681 /* nv50+ has v2.0, but we don't parse it atm */
4682 if (bios->data[tmdstableptr] != 0x11) {
4683 NV_WARN(dev,
4684 "TMDS table revision %d.%d not currently supported\n",
4685 bios->data[tmdstableptr] >> 4, bios->data[tmdstableptr] & 0xf);
4686 return -ENOSYS;
4687 }
4688
4689 /*
4690 * These two scripts are odd: they don't seem to get run even when
4691 * they are not stubbed.
4692 */
4693 script1 = ROM16(bios->data[tmdstableptr + 7]);
4694 script2 = ROM16(bios->data[tmdstableptr + 9]);
4695 if (bios->data[script1] != 'q' || bios->data[script2] != 'q')
4696 NV_WARN(dev, "TMDS table script pointers not stubbed\n");
4697
4698 bios->tmds.output0_script_ptr = ROM16(bios->data[tmdstableptr + 11]);
4699 bios->tmds.output1_script_ptr = ROM16(bios->data[tmdstableptr + 13]);
4700
4701 return 0;
4702}
4703
4704static int
4705parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4706 struct bit_entry *bitentry)
4707{
4708 /*
4709 * Parses the pointer to the G80 output script tables
4710 *
4711 * Starting at bitentry->offset:
4712 *
4713 * offset + 0 (16 bits): output script table pointer
4714 */
4715
4716 uint16_t outputscripttableptr;
4717
4718 if (bitentry->length != 3) {
4719 NV_ERROR(dev, "Do not understand BIT U table\n");
4720 return -EINVAL;
4721 }
4722
4723 outputscripttableptr = ROM16(bios->data[bitentry->offset]);
4724 bios->display.script_table_ptr = outputscripttableptr;
4725 return 0;
4726}
4727
4728static int
4729parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
4730 struct bit_entry *bitentry)
4731{
4732 bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
4733 return 0;
4734}
4735
4736struct bit_table {
4737 const char id;
4738 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
4739};
4740
4741#define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
4742
4743static int
4744parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
4745 struct bit_table *table)
4746{
4747 struct drm_device *dev = bios->dev;
4748 uint8_t maxentries = bios->data[bitoffset + 4];
4749 int i, offset;
4750 struct bit_entry bitentry;
4751
4752 for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
4753 bitentry.id[0] = bios->data[offset];
4754
4755 if (bitentry.id[0] != table->id)
4756 continue;
4757
4758 bitentry.id[1] = bios->data[offset + 1];
4759 bitentry.length = ROM16(bios->data[offset + 2]);
4760 bitentry.offset = ROM16(bios->data[offset + 4]);
4761
4762 return table->parse_fn(dev, bios, &bitentry);
4763 }
4764
4765 NV_INFO(dev, "BIT table '%c' not found\n", table->id);
4766 return -ENOSYS;
4767}
4768
4769static int
4770parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
4771{
4772 int ret;
4773
4774 /*
4775 * The only restriction on parsing order currently is having 'i' first
4776 * for use of bios->*_version or bios->feature_byte while parsing;
4777 * functions shouldn't be actually *doing* anything apart from pulling
4778 * data from the image into the bios struct, thus no interdependencies
4779 */
4780 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('i', i));
4781 if (ret) /* info? */
4782 return ret;
4783 if (bios->major_version >= 0x60) /* g80+ */
4784 parse_bit_table(bios, bitoffset, &BIT_TABLE('A', A));
4785 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('C', C));
4786 if (ret)
4787 return ret;
4788 parse_bit_table(bios, bitoffset, &BIT_TABLE('D', display));
4789 ret = parse_bit_table(bios, bitoffset, &BIT_TABLE('I', init));
4790 if (ret)
4791 return ret;
4792 parse_bit_table(bios, bitoffset, &BIT_TABLE('M', M)); /* memory? */
4793 parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
4794 parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
4795 parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
4796 parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
4797
4798 return 0;
4799}
4800
4801static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsigned int offset)
4802{
4803 /*
4804 * Parses the BMP structure for useful things, but does not act on them
4805 *
4806 * offset + 5: BMP major version
4807 * offset + 6: BMP minor version
4808 * offset + 9: BMP feature byte
4809 * offset + 10: BCD encoded BIOS version
4810 *
4811 * offset + 18: init script table pointer (for bios versions < 5.10h)
4812 * offset + 20: extra init script table pointer (for bios
4813 * versions < 5.10h)
4814 *
4815 * offset + 24: memory init table pointer (used on early bios versions)
4816 * offset + 26: SDR memory sequencing setup data table
4817 * offset + 28: DDR memory sequencing setup data table
4818 *
4819 * offset + 54: index of I2C CRTC pair to use for CRT output
4820 * offset + 55: index of I2C CRTC pair to use for TV output
4821 * offset + 56: index of I2C CRTC pair to use for flat panel output
4822 * offset + 58: write CRTC index for I2C pair 0
4823 * offset + 59: read CRTC index for I2C pair 0
4824 * offset + 60: write CRTC index for I2C pair 1
4825 * offset + 61: read CRTC index for I2C pair 1
4826 *
4827 * offset + 67: maximum internal PLL frequency (single stage PLL)
4828 * offset + 71: minimum internal PLL frequency (single stage PLL)
4829 *
4830 * offset + 75: script table pointers, as described in
4831 * parse_script_table_pointers
4832 *
4833 * offset + 89: TMDS single link output A table pointer
4834 * offset + 91: TMDS single link output B table pointer
4835 * offset + 95: LVDS single link output A table pointer
4836 * offset + 105: flat panel timings table pointer
4837 * offset + 107: flat panel strapping translation table pointer
4838 * offset + 117: LVDS manufacturer panel config table pointer
4839 * offset + 119: LVDS manufacturer strapping translation table pointer
4840 *
4841 * offset + 142: PLL limits table pointer
4842 *
4843 * offset + 156: minimum pixel clock for LVDS dual link
4844 */
4845
4846 uint8_t *bmp = &bios->data[offset], bmp_version_major, bmp_version_minor;
4847 uint16_t bmplength;
4848 uint16_t legacy_scripts_offset, legacy_i2c_offset;
4849
4850 /* load needed defaults in case we can't parse this info */
4851 bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
4852 bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
4853 bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
4854 bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
4855 bios->pub.digital_min_front_porch = 0x4b;
4856 bios->fmaxvco = 256000;
4857 bios->fminvco = 128000;
4858 bios->fp.duallink_transition_clk = 90000;
4859
4860 bmp_version_major = bmp[5];
4861 bmp_version_minor = bmp[6];
4862
4863 NV_TRACE(dev, "BMP version %d.%d\n",
4864 bmp_version_major, bmp_version_minor);
4865
4866 /*
4867 * Make sure that 0x36 is blank and can't be mistaken for a DCB
4868 * pointer on early versions
4869 */
4870 if (bmp_version_major < 5)
4871 *(uint16_t *)&bios->data[0x36] = 0;
4872
4873 /*
4874 * Seems that the minor version was 1 for all major versions prior
4875 * to 5. Version 6 could theoretically exist, but I suspect BIT
4876 * happened instead.
4877 */
4878 if ((bmp_version_major < 5 && bmp_version_minor != 1) || bmp_version_major > 5) {
4879 NV_ERROR(dev, "You have an unsupported BMP version. "
4880 "Please send in your bios\n");
4881 return -ENOSYS;
4882 }
4883
4884 if (bmp_version_major == 0)
4885 /* nothing that's currently useful in this version */
4886 return 0;
4887 else if (bmp_version_major == 1)
4888 bmplength = 44; /* exact for 1.01 */
4889 else if (bmp_version_major == 2)
4890 bmplength = 48; /* exact for 2.01 */
4891 else if (bmp_version_major == 3)
4892 bmplength = 54;
4893 /* guessed - mem init tables added in this version */
4894 else if (bmp_version_major == 4 || bmp_version_minor < 0x1)
4895 /* don't know if 5.0 exists... */
4896 bmplength = 62;
4897 /* guessed - BMP I2C indices added in version 4*/
4898 else if (bmp_version_minor < 0x6)
4899 bmplength = 67; /* exact for 5.01 */
4900 else if (bmp_version_minor < 0x10)
4901 bmplength = 75; /* exact for 5.06 */
4902 else if (bmp_version_minor == 0x10)
4903 bmplength = 89; /* exact for 5.10h */
4904 else if (bmp_version_minor < 0x14)
4905 bmplength = 118; /* exact for 5.11h */
4906 else if (bmp_version_minor < 0x24)
4907 /*
4908 * Not sure of version where pll limits came in;
4909 * certainly exist by 0x24 though.
4910 */
4911 /* length not exact: this is long enough to get lvds members */
4912 bmplength = 123;
4913 else if (bmp_version_minor < 0x27)
4914 /*
4915 * Length not exact: this is long enough to get pll limit
4916 * member
4917 */
4918 bmplength = 144;
4919 else
4920 /*
4921 * Length not exact: this is long enough to get dual link
4922 * transition clock.
4923 */
4924 bmplength = 158;
4925
4926 /* checksum */
4927 if (nv_cksum(bmp, 8)) {
4928 NV_ERROR(dev, "Bad BMP checksum\n");
4929 return -EINVAL;
4930 }
4931
4932 /*
4933 * Bit 4 seems to indicate either a mobile bios or a quadro card --
4934 * mobile behaviour consistent (nv11+), quadro only seen nv18gl-nv36gl
4935 * (not nv10gl), bit 5 that the flat panel tables are present, and
4936 * bit 6 a tv bios.
4937 */
4938 bios->feature_byte = bmp[9];
4939
4940 parse_bios_version(dev, bios, offset + 10);
4941
4942 if (bmp_version_major < 5 || bmp_version_minor < 0x10)
4943 bios->old_style_init = true;
4944 legacy_scripts_offset = 18;
4945 if (bmp_version_major < 2)
4946 legacy_scripts_offset -= 4;
4947 bios->init_script_tbls_ptr = ROM16(bmp[legacy_scripts_offset]);
4948 bios->extra_init_script_tbl_ptr = ROM16(bmp[legacy_scripts_offset + 2]);
4949
4950 if (bmp_version_major > 2) { /* appears in BMP 3 */
4951 bios->legacy.mem_init_tbl_ptr = ROM16(bmp[24]);
4952 bios->legacy.sdr_seq_tbl_ptr = ROM16(bmp[26]);
4953 bios->legacy.ddr_seq_tbl_ptr = ROM16(bmp[28]);
4954 }
4955
4956 legacy_i2c_offset = 0x48; /* BMP version 2 & 3 */
4957 if (bmplength > 61)
4958 legacy_i2c_offset = offset + 54;
4959 bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
4960 bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
4961 bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
4962 bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
4963 bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
4964 bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
4965 bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
4966
4967 if (bmplength > 74) {
4968 bios->fmaxvco = ROM32(bmp[67]);
4969 bios->fminvco = ROM32(bmp[71]);
4970 }
4971 if (bmplength > 88)
4972 parse_script_table_pointers(bios, offset + 75);
4973 if (bmplength > 94) {
4974 bios->tmds.output0_script_ptr = ROM16(bmp[89]);
4975 bios->tmds.output1_script_ptr = ROM16(bmp[91]);
4976 /*
4977 * Never observed in use with lvds scripts, but is reused for
4978 * 18/24 bit panel interface default for EDID equipped panels
4979 * (if_is_24bit not set directly to avoid any oscillation).
4980 */
4981 bios->legacy.lvds_single_a_script_ptr = ROM16(bmp[95]);
4982 }
4983 if (bmplength > 108) {
4984 bios->fp.fptablepointer = ROM16(bmp[105]);
4985 bios->fp.fpxlatetableptr = ROM16(bmp[107]);
4986 bios->fp.xlatwidth = 1;
4987 }
4988 if (bmplength > 120) {
4989 bios->fp.lvdsmanufacturerpointer = ROM16(bmp[117]);
4990 bios->fp.fpxlatemanufacturertableptr = ROM16(bmp[119]);
4991 }
4992 if (bmplength > 143)
4993 bios->pll_limit_tbl_ptr = ROM16(bmp[142]);
4994
4995 if (bmplength > 157)
4996 bios->fp.duallink_transition_clk = ROM16(bmp[156]) * 10;
4997
4998 return 0;
4999}
5000
5001static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
5002{
5003 int i, j;
5004
5005 for (i = 0; i <= (n - len); i++) {
5006 for (j = 0; j < len; j++)
5007 if (data[i + j] != str[j])
5008 break;
5009 if (j == len)
5010 return i;
5011 }
5012
5013 return 0;
5014}
5015
5016static int
5017read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
5018{
5019 uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
5020 int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
5021 int recordoffset = 0, rdofs = 1, wrofs = 0;
5022 uint8_t port_type = 0;
5023
5024 if (!i2ctable)
5025 return -EINVAL;
5026
5027 if (dcb_version >= 0x30) {
5028 if (i2ctable[0] != dcb_version) /* necessary? */
5029 NV_WARN(dev,
5030 "DCB I2C table version mismatch (%02X vs %02X)\n",
5031 i2ctable[0], dcb_version);
5032 dcb_i2c_ver = i2ctable[0];
5033 headerlen = i2ctable[1];
5034 if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
5035 i2c_entries = i2ctable[2];
5036 else
5037 NV_WARN(dev,
5038 "DCB I2C table has more entries than indexable "
5039 "(%d entries, max index 15)\n", i2ctable[2]);
5040 entry_len = i2ctable[3];
5041 /* [4] is i2c_default_indices, read in parse_dcb_table() */
5042 }
5043 /*
5044 * It's your own fault if you call this function on a DCB 1.1 BIOS --
5045 * the test below is for DCB 1.2
5046 */
5047 if (dcb_version < 0x14) {
5048 recordoffset = 2;
5049 rdofs = 0;
5050 wrofs = 1;
5051 }
5052
5053 if (index == 0xf)
5054 return 0;
5055 if (index > i2c_entries) {
5056 NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
5057 index, i2ctable[2]);
5058 return -ENOENT;
5059 }
5060 if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
5061 NV_ERROR(dev, "DCB I2C entry invalid\n");
5062 return -EINVAL;
5063 }
5064
5065 if (dcb_i2c_ver >= 0x30) {
5066 port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
5067
5068 /*
5069 * Fixup for chips using same address offset for read and
5070 * write.
5071 */
5072 if (port_type == 4) /* seen on C51 */
5073 rdofs = wrofs = 1;
5074 if (port_type >= 5) /* G80+ */
5075 rdofs = wrofs = 0;
5076 }
5077
5078 if (dcb_i2c_ver >= 0x40 && port_type != 5 && port_type != 6)
5079 NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
5080
5081 i2c->port_type = port_type;
5082 i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
5083 i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
5084
5085 return 0;
5086}
5087
5088static struct dcb_gpio_entry *
5089new_gpio_entry(struct nvbios *bios)
5090{
5091 struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
5092
5093 return &gpio->entry[gpio->entries++];
5094}
5095
5096struct dcb_gpio_entry *
5097nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
5098{
5099 struct drm_nouveau_private *dev_priv = dev->dev_private;
5100 struct nvbios *bios = &dev_priv->VBIOS;
5101 int i;
5102
5103 for (i = 0; i < bios->bdcb.gpio.entries; i++) {
5104 if (bios->bdcb.gpio.entry[i].tag != tag)
5105 continue;
5106
5107 return &bios->bdcb.gpio.entry[i];
5108 }
5109
5110 return NULL;
5111}
5112
5113static void
5114parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
5115{
5116 struct dcb_gpio_entry *gpio;
5117 uint16_t ent = ROM16(bios->data[offset]);
5118 uint8_t line = ent & 0x1f,
5119 tag = ent >> 5 & 0x3f,
5120 flags = ent >> 11 & 0x1f;
5121
5122 if (tag == 0x3f)
5123 return;
5124
5125 gpio = new_gpio_entry(bios);
5126
5127 gpio->tag = tag;
5128 gpio->line = line;
5129 gpio->invert = flags != 4;
5130}
5131
5132static void
5133parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
5134{
5135 struct dcb_gpio_entry *gpio;
5136 uint32_t ent = ROM32(bios->data[offset]);
5137 uint8_t line = ent & 0x1f,
5138 tag = ent >> 8 & 0xff;
5139
5140 if (tag == 0xff)
5141 return;
5142
5143 gpio = new_gpio_entry(bios);
5144
5145 /* Currently unused, we may need more fields parsed at some
5146 * point. */
5147 gpio->tag = tag;
5148 gpio->line = line;
5149}
5150
5151static void
5152parse_dcb_gpio_table(struct nvbios *bios)
5153{
5154 struct drm_device *dev = bios->dev;
5155 uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
5156 uint8_t *gpio_table = &bios->data[gpio_table_ptr];
5157 int header_len = gpio_table[1],
5158 entries = gpio_table[2],
5159 entry_len = gpio_table[3];
5160 void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
5161 int i;
5162
5163 if (bios->bdcb.version >= 0x40) {
5164 if (gpio_table_ptr && entry_len != 4) {
5165 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5166 return;
5167 }
5168
5169 parse_entry = parse_dcb40_gpio_entry;
5170
5171 } else if (bios->bdcb.version >= 0x30) {
5172 if (gpio_table_ptr && entry_len != 2) {
5173 NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
5174 return;
5175 }
5176
5177 parse_entry = parse_dcb30_gpio_entry;
5178
5179 } else if (bios->bdcb.version >= 0x22) {
5180 /*
5181 * DCBs older than v3.0 don't really have a GPIO
5182 * table, instead they keep some GPIO info at fixed
5183 * locations.
5184 */
5185 uint16_t dcbptr = ROM16(bios->data[0x36]);
5186 uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
5187
5188 if (tvdac_gpio[0] & 1) {
5189 struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
5190
5191 gpio->tag = DCB_GPIO_TVDAC0;
5192 gpio->line = tvdac_gpio[1] >> 4;
5193 gpio->invert = tvdac_gpio[0] & 2;
5194 }
5195 }
5196
5197 if (!gpio_table_ptr)
5198 return;
5199
5200 if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
5201 NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
5202 entries = DCB_MAX_NUM_GPIO_ENTRIES;
5203 }
5204
5205 for (i = 0; i < entries; i++)
5206 parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
5207}
5208
5209struct dcb_connector_table_entry *
5210nouveau_bios_connector_entry(struct drm_device *dev, int index)
5211{
5212 struct drm_nouveau_private *dev_priv = dev->dev_private;
5213 struct nvbios *bios = &dev_priv->VBIOS;
5214 struct dcb_connector_table_entry *cte;
5215
5216 if (index >= bios->bdcb.connector.entries)
5217 return NULL;
5218
5219 cte = &bios->bdcb.connector.entry[index];
5220 if (cte->type == 0xff)
5221 return NULL;
5222
5223 return cte;
5224}
5225
5226static void
5227parse_dcb_connector_table(struct nvbios *bios)
5228{
5229 struct drm_device *dev = bios->dev;
5230 struct dcb_connector_table *ct = &bios->bdcb.connector;
5231 struct dcb_connector_table_entry *cte;
5232 uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
5233 uint8_t *entry;
5234 int i;
5235
5236 if (!bios->bdcb.connector_table_ptr) {
5237 NV_DEBUG(dev, "No DCB connector table present\n");
5238 return;
5239 }
5240
5241 NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
5242 conntab[0], conntab[1], conntab[2], conntab[3]);
5243 if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
5244 (conntab[3] != 2 && conntab[3] != 4)) {
5245 NV_ERROR(dev, " Unknown! Please report.\n");
5246 return;
5247 }
5248
5249 ct->entries = conntab[2];
5250
5251 entry = conntab + conntab[1];
5252 cte = &ct->entry[0];
5253 for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
5254 if (conntab[3] == 2)
5255 cte->entry = ROM16(entry[0]);
5256 else
5257 cte->entry = ROM32(entry[0]);
5258 cte->type = (cte->entry & 0x000000ff) >> 0;
5259 cte->index = (cte->entry & 0x00000f00) >> 8;
5260 switch (cte->entry & 0x00033000) {
5261 case 0x00001000:
5262 cte->gpio_tag = 0x07;
5263 break;
5264 case 0x00002000:
5265 cte->gpio_tag = 0x08;
5266 break;
5267 case 0x00010000:
5268 cte->gpio_tag = 0x51;
5269 break;
5270 case 0x00020000:
5271 cte->gpio_tag = 0x52;
5272 break;
5273 default:
5274 cte->gpio_tag = 0xff;
5275 break;
5276 }
5277
5278 if (cte->type == 0xff)
5279 continue;
5280
5281 NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
5282 i, cte->entry, cte->type, cte->index, cte->gpio_tag);
5283 }
5284}
5285
5286static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
5287{
5288 struct dcb_entry *entry = &dcb->entry[dcb->entries];
5289
5290 memset(entry, 0, sizeof(struct dcb_entry));
5291 entry->index = dcb->entries++;
5292
5293 return entry;
5294}
5295
5296static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
5297{
5298 struct dcb_entry *entry = new_dcb_entry(dcb);
5299
5300 entry->type = 0;
5301 entry->i2c_index = i2c;
5302 entry->heads = heads;
5303 entry->location = DCB_LOC_ON_CHIP;
5304 /* "or" mostly unused in early gen crt modesetting, 0 is fine */
5305}
5306
5307static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
5308{
5309 struct dcb_entry *entry = new_dcb_entry(dcb);
5310
5311 entry->type = 2;
5312 entry->i2c_index = LEGACY_I2C_PANEL;
5313 entry->heads = twoHeads ? 3 : 1;
5314 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5315 entry->or = 1; /* means |0x10 gets set on CRE_LCD__INDEX */
5316 entry->duallink_possible = false; /* SiI164 and co. are single link */
5317
5318#if 0
5319 /*
5320 * For dvi-a either crtc probably works, but my card appears to only
5321 * support dvi-d. "nvidia" still attempts to program it for dvi-a,
5322 * doing the full fp output setup (program 0x6808.. fp dimension regs,
5323 * setting 0x680848 to 0x10000111 to enable, maybe setting 0x680880);
5324 * the monitor picks up the mode res ok and lights up, but no pixel
5325 * data appears, so the board manufacturer probably connected up the
5326 * sync lines, but missed the video traces / components
5327 *
5328 * with this introduction, dvi-a left as an exercise for the reader.
5329 */
5330 fabricate_vga_output(dcb, LEGACY_I2C_PANEL, entry->heads);
5331#endif
5332}
5333
5334static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
5335{
5336 struct dcb_entry *entry = new_dcb_entry(dcb);
5337
5338 entry->type = 1;
5339 entry->i2c_index = LEGACY_I2C_TV;
5340 entry->heads = twoHeads ? 3 : 1;
5341 entry->location = !DCB_LOC_ON_CHIP; /* ie OFF CHIP */
5342}
5343
5344static bool
5345parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5346 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5347{
5348 entry->type = conn & 0xf;
5349 entry->i2c_index = (conn >> 4) & 0xf;
5350 entry->heads = (conn >> 8) & 0xf;
5351 if (bdcb->version >= 0x40)
5352 entry->connector = (conn >> 12) & 0xf;
5353 entry->bus = (conn >> 16) & 0xf;
5354 entry->location = (conn >> 20) & 0x3;
5355 entry->or = (conn >> 24) & 0xf;
5356 /*
5357 * Normal entries consist of a single bit, but dual link has the
5358 * next most significant bit set too
5359 */
5360 entry->duallink_possible =
5361 ((1 << (ffs(entry->or) - 1)) * 3 == entry->or);
5362
5363 switch (entry->type) {
5364 case OUTPUT_ANALOG:
5365 /*
5366 * Although the rest of a CRT conf dword is usually
5367 * zeros, mac biosen have stuff there so we must mask
5368 */
5369 entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
5370 (conf & 0xffff) * 10 :
5371 (conf & 0xff) * 10000;
5372 break;
5373 case OUTPUT_LVDS:
5374 {
5375 uint32_t mask;
5376 if (conf & 0x1)
5377 entry->lvdsconf.use_straps_for_mode = true;
5378 if (bdcb->version < 0x22) {
5379 mask = ~0xd;
5380 /*
5381 * The laptop in bug 14567 lies and claims to not use
5382 * straps when it does, so assume all DCB 2.0 laptops
5383 * use straps, until a broken EDID using one is produced
5384 */
5385 entry->lvdsconf.use_straps_for_mode = true;
5386 /*
5387 * Both 0x4 and 0x8 show up in v2.0 tables; assume they
5388 * mean the same thing (probably wrong, but might work)
5389 */
5390 if (conf & 0x4 || conf & 0x8)
5391 entry->lvdsconf.use_power_scripts = true;
5392 } else {
5393 mask = ~0x5;
5394 if (conf & 0x4)
5395 entry->lvdsconf.use_power_scripts = true;
5396 }
5397 if (conf & mask) {
5398 /*
5399 * Until we even try to use these on G8x, it's
5400 * useless reporting unknown bits. They all are.
5401 */
5402 if (bdcb->version >= 0x40)
5403 break;
5404
5405 NV_ERROR(dev, "Unknown LVDS configuration bits, "
5406 "please report\n");
5407 }
5408 break;
5409 }
5410 case OUTPUT_TV:
5411 {
5412 if (bdcb->version >= 0x30)
5413 entry->tvconf.has_component_output = conf & (0x8 << 4);
5414 else
5415 entry->tvconf.has_component_output = false;
5416
5417 break;
5418 }
5419 case OUTPUT_DP:
5420 entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
5421 entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
5422 switch ((conf & 0x0f000000) >> 24) {
5423 case 0xf:
5424 entry->dpconf.link_nr = 4;
5425 break;
5426 case 0x3:
5427 entry->dpconf.link_nr = 2;
5428 break;
5429 default:
5430 entry->dpconf.link_nr = 1;
5431 break;
5432 }
5433 break;
5434 case OUTPUT_TMDS:
5435 entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
5436 break;
5437 case 0xe:
5438 /* weird g80 mobile type that "nv" treats as a terminator */
5439 bdcb->dcb.entries--;
5440 return false;
5441 }
5442
5443 /* unsure what DCB version introduces this, 3.0? */
5444 if (conf & 0x100000)
5445 entry->i2c_upper_default = true;
5446
5447 return true;
5448}
5449
5450static bool
5451parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
5452 uint32_t conn, uint32_t conf, struct dcb_entry *entry)
5453{
5454 if (conn != 0xf0003f00 && conn != 0xf2247f10 && conn != 0xf2204001 &&
5455 conn != 0xf2204301 && conn != 0xf2204311 && conn != 0xf2208001 &&
5456 conn != 0xf2244001 && conn != 0xf2244301 && conn != 0xf2244311 &&
5457 conn != 0xf4204011 && conn != 0xf4208011 && conn != 0xf4248011 &&
5458 conn != 0xf2045ff2 && conn != 0xf2045f14 && conn != 0xf207df14 &&
5459 conn != 0xf2205004 && conn != 0xf2209004) {
5460 NV_ERROR(dev, "Unknown DCB 1.5 entry, please report\n");
5461
5462 /* cause output setting to fail for !TV, so message is seen */
5463 if ((conn & 0xf) != 0x1)
5464 dcb->entries = 0;
5465
5466 return false;
5467 }
5468 /* most of the below is a "best guess" atm */
5469 entry->type = conn & 0xf;
5470 if (entry->type == 2)
5471 /* another way of specifying straps based lvds... */
5472 entry->type = OUTPUT_LVDS;
5473 if (entry->type == 4) { /* digital */
5474 if (conn & 0x10)
5475 entry->type = OUTPUT_LVDS;
5476 else
5477 entry->type = OUTPUT_TMDS;
5478 }
5479 /* what's in bits 5-13? could be some encoder maker thing, in tv case */
5480 entry->i2c_index = (conn >> 14) & 0xf;
5481 /* raw heads field is in range 0-1, so move to 1-2 */
5482 entry->heads = ((conn >> 18) & 0x7) + 1;
5483 entry->location = (conn >> 21) & 0xf;
5484 /* unused: entry->bus = (conn >> 25) & 0x7; */
5485 /* set or to be same as heads -- hopefully safe enough */
5486 entry->or = entry->heads;
5487 entry->duallink_possible = false;
5488
5489 switch (entry->type) {
5490 case OUTPUT_ANALOG:
5491 entry->crtconf.maxfreq = (conf & 0xffff) * 10;
5492 break;
5493 case OUTPUT_LVDS:
5494 /*
5495 * This is probably buried in conn's unknown bits.
5496 * This will upset EDID-ful models, if they exist
5497 */
5498 entry->lvdsconf.use_straps_for_mode = true;
5499 entry->lvdsconf.use_power_scripts = true;
5500 break;
5501 case OUTPUT_TMDS:
5502 /*
5503 * Invent a DVI-A output, by copying the fields of the DVI-D
5504 * output; reported to work by math_b on an NV20(!).
5505 */
5506 fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
5507 break;
5508 case OUTPUT_TV:
5509 entry->tvconf.has_component_output = false;
5510 break;
5511 }
5512
5513 return true;
5514}
5515
5516static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
5517 uint32_t conn, uint32_t conf)
5518{
5519 struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
5520 bool ret;
5521
5522 if (bdcb->version >= 0x20)
5523 ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
5524 else
5525 ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
5526 if (!ret)
5527 return ret;
5528
5529 read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
5530 entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
5531
5532 return true;
5533}
5534
5535static
5536void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
5537{
5538 /*
5539 * DCB v2.0 lists each output combination separately.
5540 * Here we merge compatible entries to have fewer outputs, with
5541 * more options
5542 */
5543
5544 int i, newentries = 0;
5545
5546 for (i = 0; i < dcb->entries; i++) {
5547 struct dcb_entry *ient = &dcb->entry[i];
5548 int j;
5549
5550 for (j = i + 1; j < dcb->entries; j++) {
5551 struct dcb_entry *jent = &dcb->entry[j];
5552
5553 if (jent->type == 100) /* already merged entry */
5554 continue;
5555
5556 /* merge heads field when all other fields the same */
5557 if (jent->i2c_index == ient->i2c_index &&
5558 jent->type == ient->type &&
5559 jent->location == ient->location &&
5560 jent->or == ient->or) {
5561 NV_TRACE(dev, "Merging DCB entries %d and %d\n",
5562 i, j);
5563 ient->heads |= jent->heads;
5564 jent->type = 100; /* dummy value */
5565 }
5566 }
5567 }
5568
5569 /* Compact entries merged into others out of dcb */
5570 for (i = 0; i < dcb->entries; i++) {
5571 if (dcb->entry[i].type == 100)
5572 continue;
5573
5574 if (newentries != i) {
5575 dcb->entry[newentries] = dcb->entry[i];
5576 dcb->entry[newentries].index = newentries;
5577 }
5578 newentries++;
5579 }
5580
5581 dcb->entries = newentries;
5582}
5583
5584static int parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
5585{
5586 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5587 struct parsed_dcb *dcb;
5588 uint16_t dcbptr, i2ctabptr = 0;
5589 uint8_t *dcbtable;
5590 uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
5591 bool configblock = true;
5592 int recordlength = 8, confofs = 4;
5593 int i;
5594
5595 dcb = bios->pub.dcb = &bdcb->dcb;
5596 dcb->entries = 0;
5597
5598 /* get the offset from 0x36 */
5599 dcbptr = ROM16(bios->data[0x36]);
5600
5601 if (dcbptr == 0x0) {
5602 NV_WARN(dev, "No output data (DCB) found in BIOS, "
5603 "assuming a CRT output exists\n");
5604 /* this situation likely means a really old card, pre DCB */
5605 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5606
5607 if (nv04_tv_identify(dev,
5608 bios->legacy.i2c_indices.tv) >= 0)
5609 fabricate_tv_output(dcb, twoHeads);
5610
5611 return 0;
5612 }
5613
5614 dcbtable = &bios->data[dcbptr];
5615
5616 /* get DCB version */
5617 bdcb->version = dcbtable[0];
5618 NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
5619 bdcb->version >> 4, bdcb->version & 0xf);
5620
5621 if (bdcb->version >= 0x20) { /* NV17+ */
5622 uint32_t sig;
5623
5624 if (bdcb->version >= 0x30) { /* NV40+ */
5625 headerlen = dcbtable[1];
5626 entries = dcbtable[2];
5627 recordlength = dcbtable[3];
5628 i2ctabptr = ROM16(dcbtable[4]);
5629 sig = ROM32(dcbtable[6]);
5630 bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
5631 bdcb->connector_table_ptr = ROM16(dcbtable[20]);
5632 } else {
5633 i2ctabptr = ROM16(dcbtable[2]);
5634 sig = ROM32(dcbtable[4]);
5635 headerlen = 8;
5636 }
5637
5638 if (sig != 0x4edcbdcb) {
5639 NV_ERROR(dev, "Bad Display Configuration Block "
5640 "signature (%08X)\n", sig);
5641 return -EINVAL;
5642 }
5643 } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
5644 char sig[8] = { 0 };
5645
5646 strncpy(sig, (char *)&dcbtable[-7], 7);
5647 i2ctabptr = ROM16(dcbtable[2]);
5648 recordlength = 10;
5649 confofs = 6;
5650
5651 if (strcmp(sig, "DEV_REC")) {
5652 NV_ERROR(dev, "Bad Display Configuration Block "
5653 "signature (%s)\n", sig);
5654 return -EINVAL;
5655 }
5656 } else {
5657 /*
5658 * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
5659 * has the same single (crt) entry, even when tv-out present, so
5660 * the conclusion is this version cannot really be used.
5661 * v1.2 tables (some NV6/10, and NV15+) normally have the same
5662 * 5 entries, which are not specific to the card and so no use.
5663 * v1.2 does have an I2C table that read_dcb_i2c_table can
5664 * handle, but cards exist (nv11 in #14821) with a bad i2c table
5665 * pointer, so use the indices parsed in parse_bmp_structure.
5666 * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
5667 */
5668 NV_TRACEWARN(dev, "No useful information in BIOS output table; "
5669 "adding all possible outputs\n");
5670 fabricate_vga_output(dcb, LEGACY_I2C_CRT, 1);
5671
5672 /*
5673 * Attempt to detect TV before DVI because the test
5674 * for the former is more accurate and it rules the
5675 * latter out.
5676 */
5677 if (nv04_tv_identify(dev,
5678 bios->legacy.i2c_indices.tv) >= 0)
5679 fabricate_tv_output(dcb, twoHeads);
5680
5681 else if (bios->tmds.output0_script_ptr ||
5682 bios->tmds.output1_script_ptr)
5683 fabricate_dvi_i_output(dcb, twoHeads);
5684
5685 return 0;
5686 }
5687
5688 if (!i2ctabptr)
5689 NV_WARN(dev, "No pointer to DCB I2C port table\n");
5690 else {
5691 bdcb->i2c_table = &bios->data[i2ctabptr];
5692 if (bdcb->version >= 0x30)
5693 bdcb->i2c_default_indices = bdcb->i2c_table[4];
5694 }
5695
5696 parse_dcb_gpio_table(bios);
5697 parse_dcb_connector_table(bios);
5698
5699 if (entries > DCB_MAX_NUM_ENTRIES)
5700 entries = DCB_MAX_NUM_ENTRIES;
5701
5702 for (i = 0; i < entries; i++) {
5703 uint32_t connection, config = 0;
5704
5705 connection = ROM32(dcbtable[headerlen + recordlength * i]);
5706 if (configblock)
5707 config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
5708
5709 /* seen on an NV11 with DCB v1.5 */
5710 if (connection == 0x00000000)
5711 break;
5712
5713 /* seen on an NV17 with DCB v2.0 */
5714 if (connection == 0xffffffff)
5715 break;
5716
5717 if ((connection & 0x0000000f) == 0x0000000f)
5718 continue;
5719
5720 NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
5721 dcb->entries, connection, config);
5722
5723 if (!parse_dcb_entry(dev, bdcb, connection, config))
5724 break;
5725 }
5726
5727 /*
5728 * apart for v2.1+ not being known for requiring merging, this
5729 * guarantees dcbent->index is the index of the entry in the rom image
5730 */
5731 if (bdcb->version < 0x21)
5732 merge_like_dcb_entries(dev, dcb);
5733
5734 return dcb->entries ? 0 : -ENXIO;
5735}
5736
5737static void
5738fixup_legacy_connector(struct nvbios *bios)
5739{
5740 struct bios_parsed_dcb *bdcb = &bios->bdcb;
5741 struct parsed_dcb *dcb = &bdcb->dcb;
5742 int high = 0, i;
5743
5744 /*
5745 * DCB 3.0 also has the table in most cases, but there are some cards
5746 * where the table is filled with stub entries, and the DCB entriy
5747 * indices are all 0. We don't need the connector indices on pre-G80
5748 * chips (yet?) so limit the use to DCB 4.0 and above.
5749 */
5750 if (bdcb->version >= 0x40)
5751 return;
5752
5753 /*
5754 * No known connector info before v3.0, so make it up. the rule here
5755 * is: anything on the same i2c bus is considered to be on the same
5756 * connector. any output without an associated i2c bus is assigned
5757 * its own unique connector index.
5758 */
5759 for (i = 0; i < dcb->entries; i++) {
5760 if (dcb->entry[i].i2c_index == 0xf)
5761 continue;
5762
5763 /*
5764 * Ignore the I2C index for on-chip TV-out, as there
5765 * are cards with bogus values (nv31m in bug 23212),
5766 * and it's otherwise useless.
5767 */
5768 if (dcb->entry[i].type == OUTPUT_TV &&
5769 dcb->entry[i].location == DCB_LOC_ON_CHIP) {
5770 dcb->entry[i].i2c_index = 0xf;
5771 continue;
5772 }
5773
5774 dcb->entry[i].connector = dcb->entry[i].i2c_index;
5775 if (dcb->entry[i].connector > high)
5776 high = dcb->entry[i].connector;
5777 }
5778
5779 for (i = 0; i < dcb->entries; i++) {
5780 if (dcb->entry[i].i2c_index != 0xf)
5781 continue;
5782
5783 dcb->entry[i].connector = ++high;
5784 }
5785}
5786
5787static void
5788fixup_legacy_i2c(struct nvbios *bios)
5789{
5790 struct parsed_dcb *dcb = &bios->bdcb.dcb;
5791 int i;
5792
5793 for (i = 0; i < dcb->entries; i++) {
5794 if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
5795 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
5796 if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
5797 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
5798 if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
5799 dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
5800 }
5801}
5802
5803static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
5804{
5805 /*
5806 * The header following the "HWSQ" signature has the number of entries,
5807 * and the entry size
5808 *
5809 * An entry consists of a dword to write to the sequencer control reg
5810 * (0x00001304), followed by the ucode bytes, written sequentially,
5811 * starting at reg 0x00001400
5812 */
5813
5814 uint8_t bytes_to_write;
5815 uint16_t hwsq_entry_offset;
5816 int i;
5817
5818 if (bios->data[hwsq_offset] <= entry) {
5819 NV_ERROR(dev, "Too few entries in HW sequencer table for "
5820 "requested entry\n");
5821 return -ENOENT;
5822 }
5823
5824 bytes_to_write = bios->data[hwsq_offset + 1];
5825
5826 if (bytes_to_write != 36) {
5827 NV_ERROR(dev, "Unknown HW sequencer entry size\n");
5828 return -EINVAL;
5829 }
5830
5831 NV_TRACE(dev, "Loading NV17 power sequencing microcode\n");
5832
5833 hwsq_entry_offset = hwsq_offset + 2 + entry * bytes_to_write;
5834
5835 /* set sequencer control */
5836 bios_wr32(bios, 0x00001304, ROM32(bios->data[hwsq_entry_offset]));
5837 bytes_to_write -= 4;
5838
5839 /* write ucode */
5840 for (i = 0; i < bytes_to_write; i += 4)
5841 bios_wr32(bios, 0x00001400 + i, ROM32(bios->data[hwsq_entry_offset + i + 4]));
5842
5843 /* twiddle NV_PBUS_DEBUG_4 */
5844 bios_wr32(bios, NV_PBUS_DEBUG_4, bios_rd32(bios, NV_PBUS_DEBUG_4) | 0x18);
5845
5846 return 0;
5847}
5848
5849static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
5850 struct nvbios *bios)
5851{
5852 /*
5853 * BMP based cards, from NV17, need a microcode loading to correctly
5854 * control the GPIO etc for LVDS panels
5855 *
5856 * BIT based cards seem to do this directly in the init scripts
5857 *
5858 * The microcode entries are found by the "HWSQ" signature.
5859 */
5860
5861 const uint8_t hwsq_signature[] = { 'H', 'W', 'S', 'Q' };
5862 const int sz = sizeof(hwsq_signature);
5863 int hwsq_offset;
5864
5865 hwsq_offset = findstr(bios->data, bios->length, hwsq_signature, sz);
5866 if (!hwsq_offset)
5867 return 0;
5868
5869 /* always use entry 0? */
5870 return load_nv17_hwsq_ucode_entry(dev, bios, hwsq_offset + sz, 0);
5871}
5872
5873uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
5874{
5875 struct drm_nouveau_private *dev_priv = dev->dev_private;
5876 struct nvbios *bios = &dev_priv->VBIOS;
5877 const uint8_t edid_sig[] = {
5878 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
5879 uint16_t offset = 0;
5880 uint16_t newoffset;
5881 int searchlen = NV_PROM_SIZE;
5882
5883 if (bios->fp.edid)
5884 return bios->fp.edid;
5885
5886 while (searchlen) {
5887 newoffset = findstr(&bios->data[offset], searchlen,
5888 edid_sig, 8);
5889 if (!newoffset)
5890 return NULL;
5891 offset += newoffset;
5892 if (!nv_cksum(&bios->data[offset], EDID1_LEN))
5893 break;
5894
5895 searchlen -= offset;
5896 offset++;
5897 }
5898
5899 NV_TRACE(dev, "Found EDID in BIOS\n");
5900
5901 return bios->fp.edid = &bios->data[offset];
5902}
5903
5904void
5905nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
5906 struct dcb_entry *dcbent)
5907{
5908 struct drm_nouveau_private *dev_priv = dev->dev_private;
5909 struct nvbios *bios = &dev_priv->VBIOS;
5910 struct init_exec iexec = { true, false };
5911
5912 bios->display.output = dcbent;
5913 parse_init_table(bios, table, &iexec);
5914 bios->display.output = NULL;
5915}
5916
5917static bool NVInitVBIOS(struct drm_device *dev)
5918{
5919 struct drm_nouveau_private *dev_priv = dev->dev_private;
5920 struct nvbios *bios = &dev_priv->VBIOS;
5921
5922 memset(bios, 0, sizeof(struct nvbios));
5923 bios->dev = dev;
5924
5925 if (!NVShadowVBIOS(dev, bios->data))
5926 return false;
5927
5928 bios->length = NV_PROM_SIZE;
5929 return true;
5930}
5931
5932static int nouveau_parse_vbios_struct(struct drm_device *dev)
5933{
5934 struct drm_nouveau_private *dev_priv = dev->dev_private;
5935 struct nvbios *bios = &dev_priv->VBIOS;
5936 const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
5937 const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
5938 int offset;
5939
5940 offset = findstr(bios->data, bios->length,
5941 bit_signature, sizeof(bit_signature));
5942 if (offset) {
5943 NV_TRACE(dev, "BIT BIOS found\n");
5944 return parse_bit_structure(bios, offset + 6);
5945 }
5946
5947 offset = findstr(bios->data, bios->length,
5948 bmp_signature, sizeof(bmp_signature));
5949 if (offset) {
5950 NV_TRACE(dev, "BMP BIOS found\n");
5951 return parse_bmp_structure(dev, bios, offset);
5952 }
5953
5954 NV_ERROR(dev, "No known BIOS signature found\n");
5955 return -ENODEV;
5956}
5957
5958int
5959nouveau_run_vbios_init(struct drm_device *dev)
5960{
5961 struct drm_nouveau_private *dev_priv = dev->dev_private;
5962 struct nvbios *bios = &dev_priv->VBIOS;
5963 int i, ret = 0;
5964
5965 NVLockVgaCrtcs(dev, false);
5966 if (nv_two_heads(dev))
5967 NVSetOwner(dev, bios->state.crtchead);
5968
5969 if (bios->major_version < 5) /* BMP only */
5970 load_nv17_hw_sequencer_ucode(dev, bios);
5971
5972 if (bios->execute) {
5973 bios->fp.last_script_invoc = 0;
5974 bios->fp.lvds_init_run = false;
5975 }
5976
5977 parse_init_tables(bios);
5978
5979 /*
5980 * Runs some additional script seen on G8x VBIOSen. The VBIOS'
5981 * parser will run this right after the init tables, the binary
5982 * driver appears to run it at some point later.
5983 */
5984 if (bios->some_script_ptr) {
5985 struct init_exec iexec = {true, false};
5986
5987 NV_INFO(dev, "Parsing VBIOS init table at offset 0x%04X\n",
5988 bios->some_script_ptr);
5989 parse_init_table(bios, bios->some_script_ptr, &iexec);
5990 }
5991
5992 if (dev_priv->card_type >= NV_50) {
5993 for (i = 0; i < bios->bdcb.dcb.entries; i++) {
5994 nouveau_bios_run_display_table(dev,
5995 &bios->bdcb.dcb.entry[i],
5996 0, 0);
5997 }
5998 }
5999
6000 NVLockVgaCrtcs(dev, true);
6001
6002 return ret;
6003}
6004
6005static void
6006nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
6007{
6008 struct drm_nouveau_private *dev_priv = dev->dev_private;
6009 struct nvbios *bios = &dev_priv->VBIOS;
6010 struct dcb_i2c_entry *entry;
6011 int i;
6012
6013 entry = &bios->bdcb.dcb.i2c[0];
6014 for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
6015 nouveau_i2c_fini(dev, entry);
6016}
6017
6018int
6019nouveau_bios_init(struct drm_device *dev)
6020{
6021 struct drm_nouveau_private *dev_priv = dev->dev_private;
6022 struct nvbios *bios = &dev_priv->VBIOS;
6023 uint32_t saved_nv_pextdev_boot_0;
6024 bool was_locked;
6025 int ret;
6026
6027 dev_priv->vbios = &bios->pub;
6028
6029 if (!NVInitVBIOS(dev))
6030 return -ENODEV;
6031
6032 ret = nouveau_parse_vbios_struct(dev);
6033 if (ret)
6034 return ret;
6035
6036 ret = parse_dcb_table(dev, bios, nv_two_heads(dev));
6037 if (ret)
6038 return ret;
6039
6040 fixup_legacy_i2c(bios);
6041 fixup_legacy_connector(bios);
6042
6043 if (!bios->major_version) /* we don't run version 0 bios */
6044 return 0;
6045
6046 /* these will need remembering across a suspend */
6047 saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
6048 bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
6049
6050 /* init script execution disabled */
6051 bios->execute = false;
6052
6053 /* ... unless card isn't POSTed already */
6054 if (dev_priv->card_type >= NV_10 &&
6055 NVReadVgaCrtc(dev, 0, 0x00) == 0 &&
6056 NVReadVgaCrtc(dev, 0, 0x1a) == 0) {
6057 NV_INFO(dev, "Adaptor not initialised\n");
6058 if (dev_priv->card_type < NV_50) {
6059 NV_ERROR(dev, "Unable to POST this chipset\n");
6060 return -ENODEV;
6061 }
6062
6063 NV_INFO(dev, "Running VBIOS init tables\n");
6064 bios->execute = true;
6065 }
6066
6067 bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
6068
6069 ret = nouveau_run_vbios_init(dev);
6070 if (ret) {
6071 dev_priv->vbios = NULL;
6072 return ret;
6073 }
6074
6075 /* feature_byte on BMP is poor, but init always sets CR4B */
6076 was_locked = NVLockVgaCrtcs(dev, false);
6077 if (bios->major_version < 5)
6078 bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
6079
6080 /* all BIT systems need p_f_m_t for digital_min_front_porch */
6081 if (bios->is_mobile || bios->major_version >= 5)
6082 ret = parse_fp_mode_table(dev, bios);
6083 NVLockVgaCrtcs(dev, was_locked);
6084
6085 /* allow subsequent scripts to execute */
6086 bios->execute = true;
6087
6088 return 0;
6089}
6090
6091void
6092nouveau_bios_takedown(struct drm_device *dev)
6093{
6094 nouveau_bios_i2c_devices_takedown(dev);
6095}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
new file mode 100644
index 000000000000..1d5f10bd78ed
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -0,0 +1,289 @@
1/*
2 * Copyright 2007-2008 Nouveau Project
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef __NOUVEAU_BIOS_H__
25#define __NOUVEAU_BIOS_H__
26
27#include "nvreg.h"
28#include "nouveau_i2c.h"
29
30#define DCB_MAX_NUM_ENTRIES 16
31#define DCB_MAX_NUM_I2C_ENTRIES 16
32#define DCB_MAX_NUM_GPIO_ENTRIES 32
33#define DCB_MAX_NUM_CONNECTOR_ENTRIES 16
34
35#define DCB_LOC_ON_CHIP 0
36
37struct dcb_entry {
38 int index; /* may not be raw dcb index if merging has happened */
39 uint8_t type;
40 uint8_t i2c_index;
41 uint8_t heads;
42 uint8_t connector;
43 uint8_t bus;
44 uint8_t location;
45 uint8_t or;
46 bool duallink_possible;
47 union {
48 struct sor_conf {
49 int link;
50 } sorconf;
51 struct {
52 int maxfreq;
53 } crtconf;
54 struct {
55 struct sor_conf sor;
56 bool use_straps_for_mode;
57 bool use_power_scripts;
58 } lvdsconf;
59 struct {
60 bool has_component_output;
61 } tvconf;
62 struct {
63 struct sor_conf sor;
64 int link_nr;
65 int link_bw;
66 } dpconf;
67 struct {
68 struct sor_conf sor;
69 } tmdsconf;
70 };
71 bool i2c_upper_default;
72};
73
74struct dcb_i2c_entry {
75 uint8_t port_type;
76 uint8_t read, write;
77 struct nouveau_i2c_chan *chan;
78};
79
80struct parsed_dcb {
81 int entries;
82 struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
83 struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
84};
85
86enum dcb_gpio_tag {
87 DCB_GPIO_TVDAC0 = 0xc,
88 DCB_GPIO_TVDAC1 = 0x2d,
89};
90
91struct dcb_gpio_entry {
92 enum dcb_gpio_tag tag;
93 int line;
94 bool invert;
95};
96
97struct parsed_dcb_gpio {
98 int entries;
99 struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
100};
101
102struct dcb_connector_table_entry {
103 uint32_t entry;
104 uint8_t type;
105 uint8_t index;
106 uint8_t gpio_tag;
107};
108
109struct dcb_connector_table {
110 int entries;
111 struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
112};
113
114struct bios_parsed_dcb {
115 uint8_t version;
116
117 struct parsed_dcb dcb;
118
119 uint8_t *i2c_table;
120 uint8_t i2c_default_indices;
121
122 uint16_t gpio_table_ptr;
123 struct parsed_dcb_gpio gpio;
124 uint16_t connector_table_ptr;
125 struct dcb_connector_table connector;
126};
127
128enum nouveau_encoder_type {
129 OUTPUT_ANALOG = 0,
130 OUTPUT_TV = 1,
131 OUTPUT_TMDS = 2,
132 OUTPUT_LVDS = 3,
133 OUTPUT_DP = 6,
134 OUTPUT_ANY = -1
135};
136
137enum nouveau_or {
138 OUTPUT_A = (1 << 0),
139 OUTPUT_B = (1 << 1),
140 OUTPUT_C = (1 << 2)
141};
142
143enum LVDS_script {
144 /* Order *does* matter here */
145 LVDS_INIT = 1,
146 LVDS_RESET,
147 LVDS_BACKLIGHT_ON,
148 LVDS_BACKLIGHT_OFF,
149 LVDS_PANEL_ON,
150 LVDS_PANEL_OFF
151};
152
153/* changing these requires matching changes to reg tables in nv_get_clock */
154#define MAX_PLL_TYPES 4
155enum pll_types {
156 NVPLL,
157 MPLL,
158 VPLL1,
159 VPLL2
160};
161
162struct pll_lims {
163 struct {
164 int minfreq;
165 int maxfreq;
166 int min_inputfreq;
167 int max_inputfreq;
168
169 uint8_t min_m;
170 uint8_t max_m;
171 uint8_t min_n;
172 uint8_t max_n;
173 } vco1, vco2;
174
175 uint8_t max_log2p;
176 /*
177 * for most pre nv50 cards setting a log2P of 7 (the common max_log2p
178 * value) is no different to 6 (at least for vplls) so allowing the MNP
179 * calc to use 7 causes the generated clock to be out by a factor of 2.
180 * however, max_log2p cannot be fixed-up during parsing as the
181 * unmodified max_log2p value is still needed for setting mplls, hence
182 * an additional max_usable_log2p member
183 */
184 uint8_t max_usable_log2p;
185 uint8_t log2p_bias;
186
187 uint8_t min_p;
188 uint8_t max_p;
189
190 int refclk;
191};
192
193struct nouveau_bios_info {
194 struct parsed_dcb *dcb;
195
196 uint8_t chip_version;
197
198 uint32_t dactestval;
199 uint32_t tvdactestval;
200 uint8_t digital_min_front_porch;
201 bool fp_no_ddc;
202};
203
204struct nvbios {
205 struct drm_device *dev;
206 struct nouveau_bios_info pub;
207
208 uint8_t data[NV_PROM_SIZE];
209 unsigned int length;
210 bool execute;
211
212 uint8_t major_version;
213 uint8_t feature_byte;
214 bool is_mobile;
215
216 uint32_t fmaxvco, fminvco;
217
218 bool old_style_init;
219 uint16_t init_script_tbls_ptr;
220 uint16_t extra_init_script_tbl_ptr;
221 uint16_t macro_index_tbl_ptr;
222 uint16_t macro_tbl_ptr;
223 uint16_t condition_tbl_ptr;
224 uint16_t io_condition_tbl_ptr;
225 uint16_t io_flag_condition_tbl_ptr;
226 uint16_t init_function_tbl_ptr;
227
228 uint16_t pll_limit_tbl_ptr;
229 uint16_t ram_restrict_tbl_ptr;
230
231 uint16_t some_script_ptr; /* BIT I + 14 */
232 uint16_t init96_tbl_ptr; /* BIT I + 16 */
233
234 struct bios_parsed_dcb bdcb;
235
236 struct {
237 int crtchead;
238 /* these need remembering across suspend */
239 uint32_t saved_nv_pfb_cfg0;
240 } state;
241
242 struct {
243 struct dcb_entry *output;
244 uint16_t script_table_ptr;
245 uint16_t dp_table_ptr;
246 } display;
247
248 struct {
249 uint16_t fptablepointer; /* also used by tmds */
250 uint16_t fpxlatetableptr;
251 int xlatwidth;
252 uint16_t lvdsmanufacturerpointer;
253 uint16_t fpxlatemanufacturertableptr;
254 uint16_t mode_ptr;
255 uint16_t xlated_entry;
256 bool power_off_for_reset;
257 bool reset_after_pclk_change;
258 bool dual_link;
259 bool link_c_increment;
260 bool BITbit1;
261 bool if_is_24bit;
262 int duallink_transition_clk;
263 uint8_t strapless_is_24bit;
264 uint8_t *edid;
265
266 /* will need resetting after suspend */
267 int last_script_invoc;
268 bool lvds_init_run;
269 } fp;
270
271 struct {
272 uint16_t output0_script_ptr;
273 uint16_t output1_script_ptr;
274 } tmds;
275
276 struct {
277 uint16_t mem_init_tbl_ptr;
278 uint16_t sdr_seq_tbl_ptr;
279 uint16_t ddr_seq_tbl_ptr;
280
281 struct {
282 uint8_t crt, tv, panel;
283 } i2c_indices;
284
285 uint16_t lvds_single_a_script_ptr;
286 } legacy;
287};
288
289#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 000000000000..320a14bceb99
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,671 @@
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
35
36static void
37nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
38{
39 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
40 struct nouveau_bo *nvbo = nouveau_bo(bo);
41
42 ttm_bo_kunmap(&nvbo->kmap);
43
44 if (unlikely(nvbo->gem))
45 DRM_ERROR("bo %p still attached to GEM object\n", bo);
46
47 spin_lock(&dev_priv->ttm.bo_list_lock);
48 list_del(&nvbo->head);
49 spin_unlock(&dev_priv->ttm.bo_list_lock);
50 kfree(nvbo);
51}
52
53int
54nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
55 int size, int align, uint32_t flags, uint32_t tile_mode,
56 uint32_t tile_flags, bool no_vm, bool mappable,
57 struct nouveau_bo **pnvbo)
58{
59 struct drm_nouveau_private *dev_priv = dev->dev_private;
60 struct nouveau_bo *nvbo;
61 int ret, n = 0;
62
63 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
64 if (!nvbo)
65 return -ENOMEM;
66 INIT_LIST_HEAD(&nvbo->head);
67 INIT_LIST_HEAD(&nvbo->entry);
68 nvbo->mappable = mappable;
69 nvbo->no_vm = no_vm;
70 nvbo->tile_mode = tile_mode;
71 nvbo->tile_flags = tile_flags;
72
73 /*
74 * Some of the tile_flags have a periodic structure of N*4096 bytes,
75 * align to to that as well as the page size. Overallocate memory to
76 * avoid corruption of other buffer objects.
77 */
78 switch (tile_flags) {
79 case 0x1800:
80 case 0x2800:
81 case 0x4800:
82 case 0x7a00:
83 if (dev_priv->chipset >= 0xA0) {
84 /* This is based on high end cards with 448 bits
85 * memory bus, could be different elsewhere.*/
86 size += 6 * 28672;
87 /* 8 * 28672 is the actual alignment requirement,
88 * but we must also align to page size. */
89 align = 2 * 8 * 28672;
90 } else if (dev_priv->chipset >= 0x90) {
91 size += 3 * 16384;
92 align = 12 * 16834;
93 } else {
94 size += 3 * 8192;
95 /* 12 * 8192 is the actual alignment requirement,
96 * but we must also align to page size. */
97 align = 2 * 12 * 8192;
98 }
99 break;
100 default:
101 break;
102 }
103
104 align >>= PAGE_SHIFT;
105
106 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
107 if (dev_priv->card_type == NV_50) {
108 size = (size + 65535) & ~65535;
109 if (align < (65536 / PAGE_SIZE))
110 align = (65536 / PAGE_SIZE);
111 }
112
113 if (flags & TTM_PL_FLAG_VRAM)
114 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
115 if (flags & TTM_PL_FLAG_TT)
116 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
117 nvbo->placement.fpfn = 0;
118 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
119 nvbo->placement.placement = nvbo->placements;
120 nvbo->placement.busy_placement = nvbo->placements;
121 nvbo->placement.num_placement = n;
122 nvbo->placement.num_busy_placement = n;
123
124 nvbo->channel = chan;
125 nouveau_bo_placement_set(nvbo, flags);
126 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
127 ttm_bo_type_device, &nvbo->placement, align, 0,
128 false, NULL, size, nouveau_bo_del_ttm);
129 nvbo->channel = NULL;
130 if (ret) {
131 /* ttm will call nouveau_bo_del_ttm if it fails.. */
132 return ret;
133 }
134
135 spin_lock(&dev_priv->ttm.bo_list_lock);
136 list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
137 spin_unlock(&dev_priv->ttm.bo_list_lock);
138 *pnvbo = nvbo;
139 return 0;
140}
141
142void
143nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
144{
145 int n = 0;
146
147 if (memtype & TTM_PL_FLAG_VRAM)
148 nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
149 if (memtype & TTM_PL_FLAG_TT)
150 nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
151 if (memtype & TTM_PL_FLAG_SYSTEM)
152 nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
153 nvbo->placement.placement = nvbo->placements;
154 nvbo->placement.busy_placement = nvbo->placements;
155 nvbo->placement.num_placement = n;
156 nvbo->placement.num_busy_placement = n;
157}
158
159int
160nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
161{
162 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
163 struct ttm_buffer_object *bo = &nvbo->bo;
164 int ret, i;
165
166 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
167 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
168 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
169 1 << bo->mem.mem_type, memtype);
170 return -EINVAL;
171 }
172
173 if (nvbo->pin_refcnt++)
174 return 0;
175
176 ret = ttm_bo_reserve(bo, false, false, false, 0);
177 if (ret)
178 goto out;
179
180 nouveau_bo_placement_set(nvbo, memtype);
181 for (i = 0; i < nvbo->placement.num_placement; i++)
182 nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
183
184 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
185 if (ret == 0) {
186 switch (bo->mem.mem_type) {
187 case TTM_PL_VRAM:
188 dev_priv->fb_aper_free -= bo->mem.size;
189 break;
190 case TTM_PL_TT:
191 dev_priv->gart_info.aper_free -= bo->mem.size;
192 break;
193 default:
194 break;
195 }
196 }
197 ttm_bo_unreserve(bo);
198out:
199 if (unlikely(ret))
200 nvbo->pin_refcnt--;
201 return ret;
202}
203
204int
205nouveau_bo_unpin(struct nouveau_bo *nvbo)
206{
207 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
208 struct ttm_buffer_object *bo = &nvbo->bo;
209 int ret, i;
210
211 if (--nvbo->pin_refcnt)
212 return 0;
213
214 ret = ttm_bo_reserve(bo, false, false, false, 0);
215 if (ret)
216 return ret;
217
218 for (i = 0; i < nvbo->placement.num_placement; i++)
219 nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
220
221 ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
222 if (ret == 0) {
223 switch (bo->mem.mem_type) {
224 case TTM_PL_VRAM:
225 dev_priv->fb_aper_free += bo->mem.size;
226 break;
227 case TTM_PL_TT:
228 dev_priv->gart_info.aper_free += bo->mem.size;
229 break;
230 default:
231 break;
232 }
233 }
234
235 ttm_bo_unreserve(bo);
236 return ret;
237}
238
239int
240nouveau_bo_map(struct nouveau_bo *nvbo)
241{
242 int ret;
243
244 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
245 if (ret)
246 return ret;
247
248 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
249 ttm_bo_unreserve(&nvbo->bo);
250 return ret;
251}
252
253void
254nouveau_bo_unmap(struct nouveau_bo *nvbo)
255{
256 ttm_bo_kunmap(&nvbo->kmap);
257}
258
259u16
260nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
261{
262 bool is_iomem;
263 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
264 mem = &mem[index];
265 if (is_iomem)
266 return ioread16_native((void __force __iomem *)mem);
267 else
268 return *mem;
269}
270
271void
272nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
273{
274 bool is_iomem;
275 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
276 mem = &mem[index];
277 if (is_iomem)
278 iowrite16_native(val, (void __force __iomem *)mem);
279 else
280 *mem = val;
281}
282
283u32
284nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
285{
286 bool is_iomem;
287 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
288 mem = &mem[index];
289 if (is_iomem)
290 return ioread32_native((void __force __iomem *)mem);
291 else
292 return *mem;
293}
294
295void
296nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
297{
298 bool is_iomem;
299 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
300 mem = &mem[index];
301 if (is_iomem)
302 iowrite32_native(val, (void __force __iomem *)mem);
303 else
304 *mem = val;
305}
306
307static struct ttm_backend *
308nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
309{
310 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
311 struct drm_device *dev = dev_priv->dev;
312
313 switch (dev_priv->gart_info.type) {
314 case NOUVEAU_GART_AGP:
315 return ttm_agp_backend_init(bdev, dev->agp->bridge);
316 case NOUVEAU_GART_SGDMA:
317 return nouveau_sgdma_init_ttm(dev);
318 default:
319 NV_ERROR(dev, "Unknown GART type %d\n",
320 dev_priv->gart_info.type);
321 break;
322 }
323
324 return NULL;
325}
326
327static int
328nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
329{
330 /* We'll do this from user space. */
331 return 0;
332}
333
334static int
335nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
336 struct ttm_mem_type_manager *man)
337{
338 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
339 struct drm_device *dev = dev_priv->dev;
340
341 switch (type) {
342 case TTM_PL_SYSTEM:
343 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
344 man->available_caching = TTM_PL_MASK_CACHING;
345 man->default_caching = TTM_PL_FLAG_CACHED;
346 break;
347 case TTM_PL_VRAM:
348 man->flags = TTM_MEMTYPE_FLAG_FIXED |
349 TTM_MEMTYPE_FLAG_MAPPABLE |
350 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
351 man->available_caching = TTM_PL_FLAG_UNCACHED |
352 TTM_PL_FLAG_WC;
353 man->default_caching = TTM_PL_FLAG_WC;
354
355 man->io_addr = NULL;
356 man->io_offset = drm_get_resource_start(dev, 1);
357 man->io_size = drm_get_resource_len(dev, 1);
358 if (man->io_size > nouveau_mem_fb_amount(dev))
359 man->io_size = nouveau_mem_fb_amount(dev);
360
361 man->gpu_offset = dev_priv->vm_vram_base;
362 break;
363 case TTM_PL_TT:
364 switch (dev_priv->gart_info.type) {
365 case NOUVEAU_GART_AGP:
366 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
367 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
368 man->available_caching = TTM_PL_FLAG_UNCACHED;
369 man->default_caching = TTM_PL_FLAG_UNCACHED;
370 break;
371 case NOUVEAU_GART_SGDMA:
372 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
373 TTM_MEMTYPE_FLAG_CMA;
374 man->available_caching = TTM_PL_MASK_CACHING;
375 man->default_caching = TTM_PL_FLAG_CACHED;
376 break;
377 default:
378 NV_ERROR(dev, "Unknown GART type: %d\n",
379 dev_priv->gart_info.type);
380 return -EINVAL;
381 }
382
383 man->io_offset = dev_priv->gart_info.aper_base;
384 man->io_size = dev_priv->gart_info.aper_size;
385 man->io_addr = NULL;
386 man->gpu_offset = dev_priv->vm_gart_base;
387 break;
388 default:
389 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
390 return -EINVAL;
391 }
392 return 0;
393}
394
395static void
396nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
397{
398 struct nouveau_bo *nvbo = nouveau_bo(bo);
399
400 switch (bo->mem.mem_type) {
401 default:
402 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
403 break;
404 }
405}
406
407
408/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
409 * TTM_PL_{VRAM,TT} directly.
410 */
411static int
412nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
413 struct nouveau_bo *nvbo, bool evict, bool no_wait,
414 struct ttm_mem_reg *new_mem)
415{
416 struct nouveau_fence *fence = NULL;
417 int ret;
418
419 ret = nouveau_fence_new(chan, &fence, true);
420 if (ret)
421 return ret;
422
423 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
424 evict, no_wait, new_mem);
425 nouveau_fence_unref((void *)&fence);
426 return ret;
427}
428
429static inline uint32_t
430nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
431 struct ttm_mem_reg *mem)
432{
433 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
434 if (mem->mem_type == TTM_PL_TT)
435 return NvDmaGART;
436 return NvDmaVRAM;
437 }
438
439 if (mem->mem_type == TTM_PL_TT)
440 return chan->gart_handle;
441 return chan->vram_handle;
442}
443
444static int
445nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
446 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
447{
448 struct nouveau_bo *nvbo = nouveau_bo(bo);
449 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
450 struct nouveau_channel *chan;
451 uint64_t src_offset, dst_offset;
452 uint32_t page_count;
453 int ret;
454
455 chan = nvbo->channel;
456 if (!chan || nvbo->tile_flags || nvbo->no_vm) {
457 chan = dev_priv->channel;
458 if (!chan)
459 return -EINVAL;
460 }
461
462 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
463 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
464 if (chan != dev_priv->channel) {
465 if (old_mem->mem_type == TTM_PL_TT)
466 src_offset += dev_priv->vm_gart_base;
467 else
468 src_offset += dev_priv->vm_vram_base;
469
470 if (new_mem->mem_type == TTM_PL_TT)
471 dst_offset += dev_priv->vm_gart_base;
472 else
473 dst_offset += dev_priv->vm_vram_base;
474 }
475
476 ret = RING_SPACE(chan, 3);
477 if (ret)
478 return ret;
479 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
480 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
481 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
482
483 if (dev_priv->card_type >= NV_50) {
484 ret = RING_SPACE(chan, 4);
485 if (ret)
486 return ret;
487 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
488 OUT_RING(chan, 1);
489 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
490 OUT_RING(chan, 1);
491 }
492
493 page_count = new_mem->num_pages;
494 while (page_count) {
495 int line_count = (page_count > 2047) ? 2047 : page_count;
496
497 if (dev_priv->card_type >= NV_50) {
498 ret = RING_SPACE(chan, 3);
499 if (ret)
500 return ret;
501 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
502 OUT_RING(chan, upper_32_bits(src_offset));
503 OUT_RING(chan, upper_32_bits(dst_offset));
504 }
505 ret = RING_SPACE(chan, 11);
506 if (ret)
507 return ret;
508 BEGIN_RING(chan, NvSubM2MF,
509 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
510 OUT_RING(chan, lower_32_bits(src_offset));
511 OUT_RING(chan, lower_32_bits(dst_offset));
512 OUT_RING(chan, PAGE_SIZE); /* src_pitch */
513 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
514 OUT_RING(chan, PAGE_SIZE); /* line_length */
515 OUT_RING(chan, line_count);
516 OUT_RING(chan, (1<<8)|(1<<0));
517 OUT_RING(chan, 0);
518 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
519 OUT_RING(chan, 0);
520
521 page_count -= line_count;
522 src_offset += (PAGE_SIZE * line_count);
523 dst_offset += (PAGE_SIZE * line_count);
524 }
525
526 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
527}
528
529static int
530nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
531 bool no_wait, struct ttm_mem_reg *new_mem)
532{
533 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
534 struct ttm_placement placement;
535 struct ttm_mem_reg tmp_mem;
536 int ret;
537
538 placement.fpfn = placement.lpfn = 0;
539 placement.num_placement = placement.num_busy_placement = 1;
540 placement.placement = &placement_memtype;
541
542 tmp_mem = *new_mem;
543 tmp_mem.mm_node = NULL;
544 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
545 if (ret)
546 return ret;
547
548 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
549 if (ret)
550 goto out;
551
552 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
553 if (ret)
554 goto out;
555
556 ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
557out:
558 if (tmp_mem.mm_node) {
559 spin_lock(&bo->bdev->glob->lru_lock);
560 drm_mm_put_block(tmp_mem.mm_node);
561 spin_unlock(&bo->bdev->glob->lru_lock);
562 }
563
564 return ret;
565}
566
567static int
568nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
569 bool no_wait, struct ttm_mem_reg *new_mem)
570{
571 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
572 struct ttm_placement placement;
573 struct ttm_mem_reg tmp_mem;
574 int ret;
575
576 placement.fpfn = placement.lpfn = 0;
577 placement.num_placement = placement.num_busy_placement = 1;
578 placement.placement = &placement_memtype;
579
580 tmp_mem = *new_mem;
581 tmp_mem.mm_node = NULL;
582 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
583 if (ret)
584 return ret;
585
586 ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
587 if (ret)
588 goto out;
589
590 ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
591 if (ret)
592 goto out;
593
594out:
595 if (tmp_mem.mm_node) {
596 spin_lock(&bo->bdev->glob->lru_lock);
597 drm_mm_put_block(tmp_mem.mm_node);
598 spin_unlock(&bo->bdev->glob->lru_lock);
599 }
600
601 return ret;
602}
603
604static int
605nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
606 bool no_wait, struct ttm_mem_reg *new_mem)
607{
608 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
609 struct nouveau_bo *nvbo = nouveau_bo(bo);
610 struct drm_device *dev = dev_priv->dev;
611 struct ttm_mem_reg *old_mem = &bo->mem;
612 int ret;
613
614 if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
615 !nvbo->no_vm) {
616 uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
617
618 ret = nv50_mem_vm_bind_linear(dev,
619 offset + dev_priv->vm_vram_base,
620 new_mem->size, nvbo->tile_flags,
621 offset);
622 if (ret)
623 return ret;
624 }
625
626 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
627 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
628
629 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
630 BUG_ON(bo->mem.mm_node != NULL);
631 bo->mem = *new_mem;
632 new_mem->mm_node = NULL;
633 return 0;
634 }
635
636 if (new_mem->mem_type == TTM_PL_SYSTEM) {
637 if (old_mem->mem_type == TTM_PL_SYSTEM)
638 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
639 if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
640 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
641 } else if (old_mem->mem_type == TTM_PL_SYSTEM) {
642 if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
643 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
644 } else {
645 if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
646 return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
647 }
648
649 return 0;
650}
651
652static int
653nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
654{
655 return 0;
656}
657
658struct ttm_bo_driver nouveau_bo_driver = {
659 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
660 .invalidate_caches = nouveau_bo_invalidate_caches,
661 .init_mem_type = nouveau_bo_init_mem_type,
662 .evict_flags = nouveau_bo_evict_flags,
663 .move = nouveau_bo_move,
664 .verify_access = nouveau_bo_verify_access,
665 .sync_obj_signaled = nouveau_fence_signalled,
666 .sync_obj_wait = nouveau_fence_wait,
667 .sync_obj_flush = nouveau_fence_flush,
668 .sync_obj_unref = nouveau_fence_unref,
669 .sync_obj_ref = nouveau_fence_ref,
670};
671
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
new file mode 100644
index 000000000000..ee2b84504d05
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -0,0 +1,478 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2007-2009 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "drmP.h"
25#include "nouveau_drv.h"
26#include "nouveau_hw.h"
27
28/****************************************************************************\
29* *
30* The video arbitration routines calculate some "magic" numbers. Fixes *
31* the snow seen when accessing the framebuffer without it. *
32* It just works (I hope). *
33* *
34\****************************************************************************/
35
36struct nv_fifo_info {
37 int lwm;
38 int burst;
39};
40
41struct nv_sim_state {
42 int pclk_khz;
43 int mclk_khz;
44 int nvclk_khz;
45 int bpp;
46 int mem_page_miss;
47 int mem_latency;
48 int memory_type;
49 int memory_width;
50 int two_heads;
51};
52
53static void
54nv04_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
55{
56 int pagemiss, cas, width, bpp;
57 int nvclks, mclks, pclks, crtpagemiss;
58 int found, mclk_extra, mclk_loop, cbs, m1, p1;
59 int mclk_freq, pclk_freq, nvclk_freq;
60 int us_m, us_n, us_p, crtc_drain_rate;
61 int cpm_us, us_crt, clwm;
62
63 pclk_freq = arb->pclk_khz;
64 mclk_freq = arb->mclk_khz;
65 nvclk_freq = arb->nvclk_khz;
66 pagemiss = arb->mem_page_miss;
67 cas = arb->mem_latency;
68 width = arb->memory_width >> 6;
69 bpp = arb->bpp;
70 cbs = 128;
71
72 pclks = 2;
73 nvclks = 10;
74 mclks = 13 + cas;
75 mclk_extra = 3;
76 found = 0;
77
78 while (!found) {
79 found = 1;
80
81 mclk_loop = mclks + mclk_extra;
82 us_m = mclk_loop * 1000 * 1000 / mclk_freq;
83 us_n = nvclks * 1000 * 1000 / nvclk_freq;
84 us_p = nvclks * 1000 * 1000 / pclk_freq;
85
86 crtc_drain_rate = pclk_freq * bpp / 8;
87 crtpagemiss = 2;
88 crtpagemiss += 1;
89 cpm_us = crtpagemiss * pagemiss * 1000 * 1000 / mclk_freq;
90 us_crt = cpm_us + us_m + us_n + us_p;
91 clwm = us_crt * crtc_drain_rate / (1000 * 1000);
92 clwm++;
93
94 m1 = clwm + cbs - 512;
95 p1 = m1 * pclk_freq / mclk_freq;
96 p1 = p1 * bpp / 8;
97 if ((p1 < m1 && m1 > 0) || clwm > 519) {
98 found = !mclk_extra;
99 mclk_extra--;
100 }
101 if (clwm < 384)
102 clwm = 384;
103
104 fifo->lwm = clwm;
105 fifo->burst = cbs;
106 }
107}
108
109static void
110nv10_calc_arb(struct nv_fifo_info *fifo, struct nv_sim_state *arb)
111{
112 int fill_rate, drain_rate;
113 int pclks, nvclks, mclks, xclks;
114 int pclk_freq, nvclk_freq, mclk_freq;
115 int fill_lat, extra_lat;
116 int max_burst_o, max_burst_l;
117 int fifo_len, min_lwm, max_lwm;
118 const int burst_lat = 80; /* Maximum allowable latency due
119 * to the CRTC FIFO burst. (ns) */
120
121 pclk_freq = arb->pclk_khz;
122 nvclk_freq = arb->nvclk_khz;
123 mclk_freq = arb->mclk_khz;
124
125 fill_rate = mclk_freq * arb->memory_width / 8; /* kB/s */
126 drain_rate = pclk_freq * arb->bpp / 8; /* kB/s */
127
128 fifo_len = arb->two_heads ? 1536 : 1024; /* B */
129
130 /* Fixed FIFO refill latency. */
131
132 pclks = 4; /* lwm detect. */
133
134 nvclks = 3 /* lwm -> sync. */
135 + 2 /* fbi bus cycles (1 req + 1 busy) */
136 + 1 /* 2 edge sync. may be very close to edge so
137 * just put one. */
138 + 1 /* fbi_d_rdv_n */
139 + 1 /* Fbi_d_rdata */
140 + 1; /* crtfifo load */
141
142 mclks = 1 /* 2 edge sync. may be very close to edge so
143 * just put one. */
144 + 1 /* arb_hp_req */
145 + 5 /* tiling pipeline */
146 + 2 /* latency fifo */
147 + 2 /* memory request to fbio block */
148 + 7; /* data returned from fbio block */
149
150 /* Need to accumulate 256 bits for read */
151 mclks += (arb->memory_type == 0 ? 2 : 1)
152 * arb->memory_width / 32;
153
154 fill_lat = mclks * 1000 * 1000 / mclk_freq /* minimum mclk latency */
155 + nvclks * 1000 * 1000 / nvclk_freq /* nvclk latency */
156 + pclks * 1000 * 1000 / pclk_freq; /* pclk latency */
157
158 /* Conditional FIFO refill latency. */
159
160 xclks = 2 * arb->mem_page_miss + mclks /* Extra latency due to
161 * the overlay. */
162 + 2 * arb->mem_page_miss /* Extra pagemiss latency. */
163 + (arb->bpp == 32 ? 8 : 4); /* Margin of error. */
164
165 extra_lat = xclks * 1000 * 1000 / mclk_freq;
166
167 if (arb->two_heads)
168 /* Account for another CRTC. */
169 extra_lat += fill_lat + extra_lat + burst_lat;
170
171 /* FIFO burst */
172
173 /* Max burst not leading to overflows. */
174 max_burst_o = (1 + fifo_len - extra_lat * drain_rate / (1000 * 1000))
175 * (fill_rate / 1000) / ((fill_rate - drain_rate) / 1000);
176 fifo->burst = min(max_burst_o, 1024);
177
178 /* Max burst value with an acceptable latency. */
179 max_burst_l = burst_lat * fill_rate / (1000 * 1000);
180 fifo->burst = min(max_burst_l, fifo->burst);
181
182 fifo->burst = rounddown_pow_of_two(fifo->burst);
183
184 /* FIFO low watermark */
185
186 min_lwm = (fill_lat + extra_lat) * drain_rate / (1000 * 1000) + 1;
187 max_lwm = fifo_len - fifo->burst
188 + fill_lat * drain_rate / (1000 * 1000)
189 + fifo->burst * drain_rate / fill_rate;
190
191 fifo->lwm = min_lwm + 10 * (max_lwm - min_lwm) / 100; /* Empirical. */
192}
193
194static void
195nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
196 int *burst, int *lwm)
197{
198 struct drm_nouveau_private *dev_priv = dev->dev_private;
199 struct nv_fifo_info fifo_data;
200 struct nv_sim_state sim_data;
201 int MClk = nouveau_hw_get_clock(dev, MPLL);
202 int NVClk = nouveau_hw_get_clock(dev, NVPLL);
203 uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
204
205 sim_data.pclk_khz = VClk;
206 sim_data.mclk_khz = MClk;
207 sim_data.nvclk_khz = NVClk;
208 sim_data.bpp = bpp;
209 sim_data.two_heads = nv_two_heads(dev);
210 if ((dev->pci_device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ ||
211 (dev->pci_device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) {
212 uint32_t type;
213
214 pci_read_config_dword(pci_get_bus_and_slot(0, 1), 0x7c, &type);
215
216 sim_data.memory_type = (type >> 12) & 1;
217 sim_data.memory_width = 64;
218 sim_data.mem_latency = 3;
219 sim_data.mem_page_miss = 10;
220 } else {
221 sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
222 sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
223 sim_data.mem_latency = cfg1 & 0xf;
224 sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
225 }
226
227 if (dev_priv->card_type == NV_04)
228 nv04_calc_arb(&fifo_data, &sim_data);
229 else
230 nv10_calc_arb(&fifo_data, &sim_data);
231
232 *burst = ilog2(fifo_data.burst >> 4);
233 *lwm = fifo_data.lwm >> 3;
234}
235
236static void
237nv30_update_arb(int *burst, int *lwm)
238{
239 unsigned int fifo_size, burst_size, graphics_lwm;
240
241 fifo_size = 2048;
242 burst_size = 512;
243 graphics_lwm = fifo_size - burst_size;
244
245 *burst = ilog2(burst_size >> 5);
246 *lwm = graphics_lwm >> 3;
247}
248
249void
250nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm)
251{
252 struct drm_nouveau_private *dev_priv = dev->dev_private;
253
254 if (dev_priv->card_type < NV_30)
255 nv04_update_arb(dev, vclk, bpp, burst, lwm);
256 else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
257 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
258 *burst = 128;
259 *lwm = 0x0480;
260 } else
261 nv30_update_arb(burst, lwm);
262}
263
264static int
265getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
266 struct nouveau_pll_vals *bestpv)
267{
268 /* Find M, N and P for a single stage PLL
269 *
270 * Note that some bioses (NV3x) have lookup tables of precomputed MNP
271 * values, but we're too lazy to use those atm
272 *
273 * "clk" parameter in kHz
274 * returns calculated clock
275 */
276 struct drm_nouveau_private *dev_priv = dev->dev_private;
277 int cv = dev_priv->vbios->chip_version;
278 int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
279 int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
280 int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
281 int minU = pll_lim->vco1.min_inputfreq;
282 int maxU = pll_lim->vco1.max_inputfreq;
283 int minP = pll_lim->max_p ? pll_lim->min_p : 0;
284 int maxP = pll_lim->max_p ? pll_lim->max_p : pll_lim->max_usable_log2p;
285 int crystal = pll_lim->refclk;
286 int M, N, thisP, P;
287 int clkP, calcclk;
288 int delta, bestdelta = INT_MAX;
289 int bestclk = 0;
290
291 /* this division verified for nv20, nv18, nv28 (Haiku), and nv34 */
292 /* possibly correlated with introduction of 27MHz crystal */
293 if (dev_priv->card_type < NV_50) {
294 if (cv < 0x17 || cv == 0x1a || cv == 0x20) {
295 if (clk > 250000)
296 maxM = 6;
297 if (clk > 340000)
298 maxM = 2;
299 } else if (cv < 0x40) {
300 if (clk > 150000)
301 maxM = 6;
302 if (clk > 200000)
303 maxM = 4;
304 if (clk > 340000)
305 maxM = 2;
306 }
307 }
308
309 P = pll_lim->max_p ? maxP : (1 << maxP);
310 if ((clk * P) < minvco) {
311 minvco = clk * maxP;
312 maxvco = minvco * 2;
313 }
314
315 if (clk + clk/200 > maxvco) /* +0.5% */
316 maxvco = clk + clk/200;
317
318 /* NV34 goes maxlog2P->0, NV20 goes 0->maxlog2P */
319 for (thisP = minP; thisP <= maxP; thisP++) {
320 P = pll_lim->max_p ? thisP : (1 << thisP);
321 clkP = clk * P;
322
323 if (clkP < minvco)
324 continue;
325 if (clkP > maxvco)
326 return bestclk;
327
328 for (M = minM; M <= maxM; M++) {
329 if (crystal/M < minU)
330 return bestclk;
331 if (crystal/M > maxU)
332 continue;
333
334 /* add crystal/2 to round better */
335 N = (clkP * M + crystal/2) / crystal;
336
337 if (N < minN)
338 continue;
339 if (N > maxN)
340 break;
341
342 /* more rounding additions */
343 calcclk = ((N * crystal + P/2) / P + M/2) / M;
344 delta = abs(calcclk - clk);
345 /* we do an exhaustive search rather than terminating
346 * on an optimality condition...
347 */
348 if (delta < bestdelta) {
349 bestdelta = delta;
350 bestclk = calcclk;
351 bestpv->N1 = N;
352 bestpv->M1 = M;
353 bestpv->log2P = thisP;
354 if (delta == 0) /* except this one */
355 return bestclk;
356 }
357 }
358 }
359
360 return bestclk;
361}
362
363static int
364getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
365 struct nouveau_pll_vals *bestpv)
366{
367 /* Find M, N and P for a two stage PLL
368 *
369 * Note that some bioses (NV30+) have lookup tables of precomputed MNP
370 * values, but we're too lazy to use those atm
371 *
372 * "clk" parameter in kHz
373 * returns calculated clock
374 */
375 struct drm_nouveau_private *dev_priv = dev->dev_private;
376 int chip_version = dev_priv->vbios->chip_version;
377 int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
378 int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
379 int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
380 int maxU1 = pll_lim->vco1.max_inputfreq, maxU2 = pll_lim->vco2.max_inputfreq;
381 int minM1 = pll_lim->vco1.min_m, maxM1 = pll_lim->vco1.max_m;
382 int minN1 = pll_lim->vco1.min_n, maxN1 = pll_lim->vco1.max_n;
383 int minM2 = pll_lim->vco2.min_m, maxM2 = pll_lim->vco2.max_m;
384 int minN2 = pll_lim->vco2.min_n, maxN2 = pll_lim->vco2.max_n;
385 int maxlog2P = pll_lim->max_usable_log2p;
386 int crystal = pll_lim->refclk;
387 bool fixedgain2 = (minM2 == maxM2 && minN2 == maxN2);
388 int M1, N1, M2, N2, log2P;
389 int clkP, calcclk1, calcclk2, calcclkout;
390 int delta, bestdelta = INT_MAX;
391 int bestclk = 0;
392
393 int vco2 = (maxvco2 - maxvco2/200) / 2;
394 for (log2P = 0; clk && log2P < maxlog2P && clk <= (vco2 >> log2P); log2P++)
395 ;
396 clkP = clk << log2P;
397
398 if (maxvco2 < clk + clk/200) /* +0.5% */
399 maxvco2 = clk + clk/200;
400
401 for (M1 = minM1; M1 <= maxM1; M1++) {
402 if (crystal/M1 < minU1)
403 return bestclk;
404 if (crystal/M1 > maxU1)
405 continue;
406
407 for (N1 = minN1; N1 <= maxN1; N1++) {
408 calcclk1 = crystal * N1 / M1;
409 if (calcclk1 < minvco1)
410 continue;
411 if (calcclk1 > maxvco1)
412 break;
413
414 for (M2 = minM2; M2 <= maxM2; M2++) {
415 if (calcclk1/M2 < minU2)
416 break;
417 if (calcclk1/M2 > maxU2)
418 continue;
419
420 /* add calcclk1/2 to round better */
421 N2 = (clkP * M2 + calcclk1/2) / calcclk1;
422 if (N2 < minN2)
423 continue;
424 if (N2 > maxN2)
425 break;
426
427 if (!fixedgain2) {
428 if (chip_version < 0x60)
429 if (N2/M2 < 4 || N2/M2 > 10)
430 continue;
431
432 calcclk2 = calcclk1 * N2 / M2;
433 if (calcclk2 < minvco2)
434 break;
435 if (calcclk2 > maxvco2)
436 continue;
437 } else
438 calcclk2 = calcclk1;
439
440 calcclkout = calcclk2 >> log2P;
441 delta = abs(calcclkout - clk);
442 /* we do an exhaustive search rather than terminating
443 * on an optimality condition...
444 */
445 if (delta < bestdelta) {
446 bestdelta = delta;
447 bestclk = calcclkout;
448 bestpv->N1 = N1;
449 bestpv->M1 = M1;
450 bestpv->N2 = N2;
451 bestpv->M2 = M2;
452 bestpv->log2P = log2P;
453 if (delta == 0) /* except this one */
454 return bestclk;
455 }
456 }
457 }
458 }
459
460 return bestclk;
461}
462
463int
464nouveau_calc_pll_mnp(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
465 struct nouveau_pll_vals *pv)
466{
467 int outclk;
468
469 if (!pll_lim->vco2.maxfreq)
470 outclk = getMNP_single(dev, pll_lim, clk, pv);
471 else
472 outclk = getMNP_double(dev, pll_lim, clk, pv);
473
474 if (!outclk)
475 NV_ERROR(dev, "Could not find a compatible set of PLL values\n");
476
477 return outclk;
478}
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
new file mode 100644
index 000000000000..9aaa972f8822
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -0,0 +1,468 @@
1/*
2 * Copyright 2005-2006 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drv.h"
28#include "nouveau_drm.h"
29#include "nouveau_dma.h"
30
31static int
32nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
33{
34 struct drm_device *dev = chan->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_bo *pb = chan->pushbuf_bo;
37 struct nouveau_gpuobj *pushbuf = NULL;
38 uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
39 int ret;
40
41 if (pb->bo.mem.mem_type == TTM_PL_TT) {
42 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
43 dev_priv->gart_info.aper_size,
44 NV_DMA_ACCESS_RO, &pushbuf,
45 NULL);
46 chan->pushbuf_base = start;
47 } else
48 if (dev_priv->card_type != NV_04) {
49 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
50 dev_priv->fb_available_size,
51 NV_DMA_ACCESS_RO,
52 NV_DMA_TARGET_VIDMEM, &pushbuf);
53 chan->pushbuf_base = start;
54 } else {
55 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
56 * exact reason for existing :) PCI access to cmdbuf in
57 * VRAM.
58 */
59 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
60 drm_get_resource_start(dev, 1),
61 dev_priv->fb_available_size,
62 NV_DMA_ACCESS_RO,
63 NV_DMA_TARGET_PCI, &pushbuf);
64 chan->pushbuf_base = start;
65 }
66
67 ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
68 if (ret) {
69 NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
70 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
71 nouveau_gpuobj_del(dev, &pushbuf);
72 return ret;
73 }
74
75 return 0;
76}
77
78static struct nouveau_bo *
79nouveau_channel_user_pushbuf_alloc(struct drm_device *dev)
80{
81 struct nouveau_bo *pushbuf = NULL;
82 int location, ret;
83
84 if (nouveau_vram_pushbuf)
85 location = TTM_PL_FLAG_VRAM;
86 else
87 location = TTM_PL_FLAG_TT;
88
89 ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, false,
90 true, &pushbuf);
91 if (ret) {
92 NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret);
93 return NULL;
94 }
95
96 ret = nouveau_bo_pin(pushbuf, location);
97 if (ret) {
98 NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret);
99 nouveau_bo_ref(NULL, &pushbuf);
100 return NULL;
101 }
102
103 return pushbuf;
104}
105
106/* allocates and initializes a fifo for user space consumption */
107int
108nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
109 struct drm_file *file_priv,
110 uint32_t vram_handle, uint32_t tt_handle)
111{
112 struct drm_nouveau_private *dev_priv = dev->dev_private;
113 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
114 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
115 struct nouveau_channel *chan;
116 int channel, user;
117 int ret;
118
119 /*
120 * Alright, here is the full story
121 * Nvidia cards have multiple hw fifo contexts (praise them for that,
122 * no complicated crash-prone context switches)
123 * We allocate a new context for each app and let it write to it
124 * directly (woo, full userspace command submission !)
125 * When there are no more contexts, you lost
126 */
127 for (channel = 0; channel < pfifo->channels; channel++) {
128 if (dev_priv->fifos[channel] == NULL)
129 break;
130 }
131
132 /* no more fifos. you lost. */
133 if (channel == pfifo->channels)
134 return -EINVAL;
135
136 dev_priv->fifos[channel] = kzalloc(sizeof(struct nouveau_channel),
137 GFP_KERNEL);
138 if (!dev_priv->fifos[channel])
139 return -ENOMEM;
140 dev_priv->fifo_alloc_count++;
141 chan = dev_priv->fifos[channel];
142 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
143 INIT_LIST_HEAD(&chan->fence.pending);
144 chan->dev = dev;
145 chan->id = channel;
146 chan->file_priv = file_priv;
147 chan->vram_handle = vram_handle;
148 chan->gart_handle = tt_handle;
149
150 NV_INFO(dev, "Allocating FIFO number %d\n", channel);
151
152 /* Allocate DMA push buffer */
153 chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev);
154 if (!chan->pushbuf_bo) {
155 ret = -ENOMEM;
156 NV_ERROR(dev, "pushbuf %d\n", ret);
157 nouveau_channel_free(chan);
158 return ret;
159 }
160
161 /* Locate channel's user control regs */
162 if (dev_priv->card_type < NV_40)
163 user = NV03_USER(channel);
164 else
165 if (dev_priv->card_type < NV_50)
166 user = NV40_USER(channel);
167 else
168 user = NV50_USER(channel);
169
170 chan->user = ioremap(pci_resource_start(dev->pdev, 0) + user,
171 PAGE_SIZE);
172 if (!chan->user) {
173 NV_ERROR(dev, "ioremap of regs failed.\n");
174 nouveau_channel_free(chan);
175 return -ENOMEM;
176 }
177 chan->user_put = 0x40;
178 chan->user_get = 0x44;
179
180 /* Allocate space for per-channel fixed notifier memory */
181 ret = nouveau_notifier_init_channel(chan);
182 if (ret) {
183 NV_ERROR(dev, "ntfy %d\n", ret);
184 nouveau_channel_free(chan);
185 return ret;
186 }
187
188 /* Setup channel's default objects */
189 ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
190 if (ret) {
191 NV_ERROR(dev, "gpuobj %d\n", ret);
192 nouveau_channel_free(chan);
193 return ret;
194 }
195
196 /* Create a dma object for the push buffer */
197 ret = nouveau_channel_pushbuf_ctxdma_init(chan);
198 if (ret) {
199 NV_ERROR(dev, "pbctxdma %d\n", ret);
200 nouveau_channel_free(chan);
201 return ret;
202 }
203
204 /* disable the fifo caches */
205 pfifo->reassign(dev, false);
206
207 /* Create a graphics context for new channel */
208 ret = pgraph->create_context(chan);
209 if (ret) {
210 nouveau_channel_free(chan);
211 return ret;
212 }
213
214 /* Construct inital RAMFC for new channel */
215 ret = pfifo->create_context(chan);
216 if (ret) {
217 nouveau_channel_free(chan);
218 return ret;
219 }
220
221 pfifo->reassign(dev, true);
222
223 ret = nouveau_dma_init(chan);
224 if (!ret)
225 ret = nouveau_fence_init(chan);
226 if (ret) {
227 nouveau_channel_free(chan);
228 return ret;
229 }
230
231 nouveau_debugfs_channel_init(chan);
232
233 NV_INFO(dev, "%s: initialised FIFO %d\n", __func__, channel);
234 *chan_ret = chan;
235 return 0;
236}
237
238int
239nouveau_channel_idle(struct nouveau_channel *chan)
240{
241 struct drm_device *dev = chan->dev;
242 struct drm_nouveau_private *dev_priv = dev->dev_private;
243 struct nouveau_engine *engine = &dev_priv->engine;
244 uint32_t caches;
245 int idle;
246
247 if (!chan) {
248 NV_ERROR(dev, "no channel...\n");
249 return 1;
250 }
251
252 caches = nv_rd32(dev, NV03_PFIFO_CACHES);
253 nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
254
255 if (engine->fifo.channel_id(dev) != chan->id) {
256 struct nouveau_gpuobj *ramfc =
257 chan->ramfc ? chan->ramfc->gpuobj : NULL;
258
259 if (!ramfc) {
260 NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
261 return 1;
262 }
263
264 engine->instmem.prepare_access(dev, false);
265 if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
266 idle = 0;
267 else
268 idle = 1;
269 engine->instmem.finish_access(dev);
270 } else {
271 idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
272 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
273 }
274
275 nv_wr32(dev, NV03_PFIFO_CACHES, caches);
276 return idle;
277}
278
279/* stops a fifo */
280void
281nouveau_channel_free(struct nouveau_channel *chan)
282{
283 struct drm_device *dev = chan->dev;
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
286 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
287 unsigned long flags;
288 int ret;
289
290 NV_INFO(dev, "%s: freeing fifo %d\n", __func__, chan->id);
291
292 nouveau_debugfs_channel_fini(chan);
293
294 /* Give outstanding push buffers a chance to complete */
295 spin_lock_irqsave(&chan->fence.lock, flags);
296 nouveau_fence_update(chan);
297 spin_unlock_irqrestore(&chan->fence.lock, flags);
298 if (chan->fence.sequence != chan->fence.sequence_ack) {
299 struct nouveau_fence *fence = NULL;
300
301 ret = nouveau_fence_new(chan, &fence, true);
302 if (ret == 0) {
303 ret = nouveau_fence_wait(fence, NULL, false, false);
304 nouveau_fence_unref((void *)&fence);
305 }
306
307 if (ret)
308 NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id);
309 }
310
311 /* Ensure all outstanding fences are signaled. They should be if the
312 * above attempts at idling were OK, but if we failed this'll tell TTM
313 * we're done with the buffers.
314 */
315 nouveau_fence_fini(chan);
316
317 /* Ensure the channel is no longer active on the GPU */
318 pfifo->reassign(dev, false);
319
320 if (pgraph->channel(dev) == chan) {
321 pgraph->fifo_access(dev, false);
322 pgraph->unload_context(dev);
323 pgraph->fifo_access(dev, true);
324 }
325 pgraph->destroy_context(chan);
326
327 if (pfifo->channel_id(dev) == chan->id) {
328 pfifo->disable(dev);
329 pfifo->unload_context(dev);
330 pfifo->enable(dev);
331 }
332 pfifo->destroy_context(chan);
333
334 pfifo->reassign(dev, true);
335
336 /* Release the channel's resources */
337 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
338 if (chan->pushbuf_bo) {
339 nouveau_bo_unpin(chan->pushbuf_bo);
340 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
341 }
342 nouveau_gpuobj_channel_takedown(chan);
343 nouveau_notifier_takedown_channel(chan);
344 if (chan->user)
345 iounmap(chan->user);
346
347 dev_priv->fifos[chan->id] = NULL;
348 dev_priv->fifo_alloc_count--;
349 kfree(chan);
350}
351
352/* cleans up all the fifos from file_priv */
353void
354nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_engine *engine = &dev_priv->engine;
358 int i;
359
360 NV_DEBUG(dev, "clearing FIFO enables from file_priv\n");
361 for (i = 0; i < engine->fifo.channels; i++) {
362 struct nouveau_channel *chan = dev_priv->fifos[i];
363
364 if (chan && chan->file_priv == file_priv)
365 nouveau_channel_free(chan);
366 }
367}
368
369int
370nouveau_channel_owner(struct drm_device *dev, struct drm_file *file_priv,
371 int channel)
372{
373 struct drm_nouveau_private *dev_priv = dev->dev_private;
374 struct nouveau_engine *engine = &dev_priv->engine;
375
376 if (channel >= engine->fifo.channels)
377 return 0;
378 if (dev_priv->fifos[channel] == NULL)
379 return 0;
380
381 return (dev_priv->fifos[channel]->file_priv == file_priv);
382}
383
384/***********************************
385 * ioctls wrapping the functions
386 ***********************************/
387
388static int
389nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
390 struct drm_file *file_priv)
391{
392 struct drm_nouveau_private *dev_priv = dev->dev_private;
393 struct drm_nouveau_channel_alloc *init = data;
394 struct nouveau_channel *chan;
395 int ret;
396
397 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
398
399 if (dev_priv->engine.graph.accel_blocked)
400 return -ENODEV;
401
402 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
403 return -EINVAL;
404
405 ret = nouveau_channel_alloc(dev, &chan, file_priv,
406 init->fb_ctxdma_handle,
407 init->tt_ctxdma_handle);
408 if (ret)
409 return ret;
410 init->channel = chan->id;
411
412 init->subchan[0].handle = NvM2MF;
413 if (dev_priv->card_type < NV_50)
414 init->subchan[0].grclass = 0x0039;
415 else
416 init->subchan[0].grclass = 0x5039;
417 init->nr_subchan = 1;
418
419 /* Named memory object area */
420 ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
421 &init->notifier_handle);
422 if (ret) {
423 nouveau_channel_free(chan);
424 return ret;
425 }
426
427 return 0;
428}
429
430static int
431nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
432 struct drm_file *file_priv)
433{
434 struct drm_nouveau_channel_free *cfree = data;
435 struct nouveau_channel *chan;
436
437 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
438 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
439
440 nouveau_channel_free(chan);
441 return 0;
442}
443
444/***********************************
445 * finally, the ioctl table
446 ***********************************/
447
448struct drm_ioctl_desc nouveau_ioctls[] = {
449 DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
450 DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
451 DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
452 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
453 DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
454 DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
455 DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
456 DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
457 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
458 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
459 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
460 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
461 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
462 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
463 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
464 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
465 DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
466};
467
468int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
new file mode 100644
index 000000000000..032cf098fa1c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -0,0 +1,824 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_edid.h"
29#include "drm_crtc_helper.h"
30#include "nouveau_reg.h"
31#include "nouveau_drv.h"
32#include "nouveau_encoder.h"
33#include "nouveau_crtc.h"
34#include "nouveau_connector.h"
35#include "nouveau_hw.h"
36
37static inline struct drm_encoder_slave_funcs *
38get_slave_funcs(struct nouveau_encoder *enc)
39{
40 return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
41}
42
43static struct nouveau_encoder *
44find_encoder_by_type(struct drm_connector *connector, int type)
45{
46 struct drm_device *dev = connector->dev;
47 struct nouveau_encoder *nv_encoder;
48 struct drm_mode_object *obj;
49 int i, id;
50
51 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
52 id = connector->encoder_ids[i];
53 if (!id)
54 break;
55
56 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
57 if (!obj)
58 continue;
59 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
60
61 if (type == OUTPUT_ANY || nv_encoder->dcb->type == type)
62 return nv_encoder;
63 }
64
65 return NULL;
66}
67
68struct nouveau_connector *
69nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
70{
71 struct drm_device *dev = to_drm_encoder(encoder)->dev;
72 struct drm_connector *drm_connector;
73
74 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
75 if (drm_connector->encoder == to_drm_encoder(encoder))
76 return nouveau_connector(drm_connector);
77 }
78
79 return NULL;
80}
81
82
83static void
84nouveau_connector_destroy(struct drm_connector *drm_connector)
85{
86 struct nouveau_connector *connector = nouveau_connector(drm_connector);
87 struct drm_device *dev = connector->base.dev;
88
89 NV_DEBUG(dev, "\n");
90
91 if (!connector)
92 return;
93
94 drm_sysfs_connector_remove(drm_connector);
95 drm_connector_cleanup(drm_connector);
96 kfree(drm_connector);
97}
98
99static void
100nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
101{
102 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
103
104 if (dev_priv->card_type >= NV_50)
105 return;
106
107 *flags = 0;
108 if (NVLockVgaCrtcs(dev_priv->dev, false))
109 *flags |= 1;
110 if (nv_heads_tied(dev_priv->dev))
111 *flags |= 2;
112
113 if (*flags & 2)
114 NVSetOwner(dev_priv->dev, 0); /* necessary? */
115}
116
117static void
118nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
119{
120 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
121
122 if (dev_priv->card_type >= NV_50)
123 return;
124
125 if (flags & 2)
126 NVSetOwner(dev_priv->dev, 4);
127 if (flags & 1)
128 NVLockVgaCrtcs(dev_priv->dev, true);
129}
130
131static struct nouveau_i2c_chan *
132nouveau_connector_ddc_detect(struct drm_connector *connector,
133 struct nouveau_encoder **pnv_encoder)
134{
135 struct drm_device *dev = connector->dev;
136 uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
137 int ret, flags, i;
138
139 struct i2c_msg msgs[] = {
140 {
141 .addr = 0x50,
142 .flags = 0,
143 .len = 1,
144 .buf = out_buf,
145 },
146 {
147 .addr = 0x50,
148 .flags = I2C_M_RD,
149 .len = 1,
150 .buf = buf,
151 }
152 };
153
154 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
155 struct nouveau_i2c_chan *i2c = NULL;
156 struct nouveau_encoder *nv_encoder;
157 struct drm_mode_object *obj;
158 int id;
159
160 id = connector->encoder_ids[i];
161 if (!id)
162 break;
163
164 obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
165 if (!obj)
166 continue;
167 nv_encoder = nouveau_encoder(obj_to_encoder(obj));
168
169 if (nv_encoder->dcb->i2c_index < 0xf)
170 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
171 if (!i2c)
172 continue;
173
174 nouveau_connector_ddc_prepare(connector, &flags);
175 ret = i2c_transfer(&i2c->adapter, msgs, 2);
176 nouveau_connector_ddc_finish(connector, flags);
177
178 if (ret == 2) {
179 *pnv_encoder = nv_encoder;
180 return i2c;
181 }
182 }
183
184 return NULL;
185}
186
187static void
188nouveau_connector_set_encoder(struct drm_connector *connector,
189 struct nouveau_encoder *nv_encoder)
190{
191 struct nouveau_connector *nv_connector = nouveau_connector(connector);
192 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
193 struct drm_device *dev = connector->dev;
194
195 if (nv_connector->detected_encoder == nv_encoder)
196 return;
197 nv_connector->detected_encoder = nv_encoder;
198
199 if (nv_encoder->dcb->type == OUTPUT_LVDS ||
200 nv_encoder->dcb->type == OUTPUT_TMDS) {
201 connector->doublescan_allowed = false;
202 connector->interlace_allowed = false;
203 } else {
204 connector->doublescan_allowed = true;
205 if (dev_priv->card_type == NV_20 ||
206 (dev_priv->card_type == NV_10 &&
207 (dev->pci_device & 0x0ff0) != 0x0100 &&
208 (dev->pci_device & 0x0ff0) != 0x0150))
209 /* HW is broken */
210 connector->interlace_allowed = false;
211 else
212 connector->interlace_allowed = true;
213 }
214
215 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
216 drm_connector_property_set_value(connector,
217 dev->mode_config.dvi_i_subconnector_property,
218 nv_encoder->dcb->type == OUTPUT_TMDS ?
219 DRM_MODE_SUBCONNECTOR_DVID :
220 DRM_MODE_SUBCONNECTOR_DVIA);
221 }
222}
223
224static enum drm_connector_status
225nouveau_connector_detect(struct drm_connector *connector)
226{
227 struct drm_device *dev = connector->dev;
228 struct nouveau_connector *nv_connector = nouveau_connector(connector);
229 struct nouveau_encoder *nv_encoder = NULL;
230 struct nouveau_i2c_chan *i2c;
231 int type, flags;
232
233 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
234 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
235 if (nv_encoder && nv_connector->native_mode) {
236 nouveau_connector_set_encoder(connector, nv_encoder);
237 return connector_status_connected;
238 }
239
240 i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
241 if (i2c) {
242 nouveau_connector_ddc_prepare(connector, &flags);
243 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
244 nouveau_connector_ddc_finish(connector, flags);
245 drm_mode_connector_update_edid_property(connector,
246 nv_connector->edid);
247 if (!nv_connector->edid) {
248 NV_ERROR(dev, "DDC responded, but no EDID for %s\n",
249 drm_get_connector_name(connector));
250 return connector_status_disconnected;
251 }
252
253 if (nv_encoder->dcb->type == OUTPUT_DP &&
254 !nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
255 NV_ERROR(dev, "Detected %s, but failed init\n",
256 drm_get_connector_name(connector));
257 return connector_status_disconnected;
258 }
259
260 /* Override encoder type for DVI-I based on whether EDID
261 * says the display is digital or analog, both use the
262 * same i2c channel so the value returned from ddc_detect
263 * isn't necessarily correct.
264 */
265 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
266 if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
267 type = OUTPUT_TMDS;
268 else
269 type = OUTPUT_ANALOG;
270
271 nv_encoder = find_encoder_by_type(connector, type);
272 if (!nv_encoder) {
273 NV_ERROR(dev, "Detected %d encoder on %s, "
274 "but no object!\n", type,
275 drm_get_connector_name(connector));
276 return connector_status_disconnected;
277 }
278 }
279
280 nouveau_connector_set_encoder(connector, nv_encoder);
281 return connector_status_connected;
282 }
283
284 nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
285 if (!nv_encoder)
286 nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
287 if (nv_encoder) {
288 struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
289 struct drm_encoder_helper_funcs *helper =
290 encoder->helper_private;
291
292 if (helper->detect(encoder, connector) ==
293 connector_status_connected) {
294 nouveau_connector_set_encoder(connector, nv_encoder);
295 return connector_status_connected;
296 }
297
298 }
299
300 return connector_status_disconnected;
301}
302
303static void
304nouveau_connector_force(struct drm_connector *connector)
305{
306 struct drm_device *dev = connector->dev;
307 struct nouveau_encoder *nv_encoder;
308 int type;
309
310 if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
311 if (connector->force == DRM_FORCE_ON_DIGITAL)
312 type = OUTPUT_TMDS;
313 else
314 type = OUTPUT_ANALOG;
315 } else
316 type = OUTPUT_ANY;
317
318 nv_encoder = find_encoder_by_type(connector, type);
319 if (!nv_encoder) {
320 NV_ERROR(dev, "can't find encoder to force %s on!\n",
321 drm_get_connector_name(connector));
322 connector->status = connector_status_disconnected;
323 return;
324 }
325
326 nouveau_connector_set_encoder(connector, nv_encoder);
327}
328
329static int
330nouveau_connector_set_property(struct drm_connector *connector,
331 struct drm_property *property, uint64_t value)
332{
333 struct nouveau_connector *nv_connector = nouveau_connector(connector);
334 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
335 struct drm_device *dev = connector->dev;
336 int ret;
337
338 /* Scaling mode */
339 if (property == dev->mode_config.scaling_mode_property) {
340 struct nouveau_crtc *nv_crtc = NULL;
341 bool modeset = false;
342
343 switch (value) {
344 case DRM_MODE_SCALE_NONE:
345 case DRM_MODE_SCALE_FULLSCREEN:
346 case DRM_MODE_SCALE_CENTER:
347 case DRM_MODE_SCALE_ASPECT:
348 break;
349 default:
350 return -EINVAL;
351 }
352
353 /* LVDS always needs gpu scaling */
354 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
355 value == DRM_MODE_SCALE_NONE)
356 return -EINVAL;
357
358 /* Changing between GPU and panel scaling requires a full
359 * modeset
360 */
361 if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
362 (value == DRM_MODE_SCALE_NONE))
363 modeset = true;
364 nv_connector->scaling_mode = value;
365
366 if (connector->encoder && connector->encoder->crtc)
367 nv_crtc = nouveau_crtc(connector->encoder->crtc);
368 if (!nv_crtc)
369 return 0;
370
371 if (modeset || !nv_crtc->set_scale) {
372 ret = drm_crtc_helper_set_mode(&nv_crtc->base,
373 &nv_crtc->base.mode,
374 nv_crtc->base.x,
375 nv_crtc->base.y, NULL);
376 if (!ret)
377 return -EINVAL;
378 } else {
379 ret = nv_crtc->set_scale(nv_crtc, value, true);
380 if (ret)
381 return ret;
382 }
383
384 return 0;
385 }
386
387 /* Dithering */
388 if (property == dev->mode_config.dithering_mode_property) {
389 struct nouveau_crtc *nv_crtc = NULL;
390
391 if (value == DRM_MODE_DITHERING_ON)
392 nv_connector->use_dithering = true;
393 else
394 nv_connector->use_dithering = false;
395
396 if (connector->encoder && connector->encoder->crtc)
397 nv_crtc = nouveau_crtc(connector->encoder->crtc);
398
399 if (!nv_crtc || !nv_crtc->set_dither)
400 return 0;
401
402 return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
403 true);
404 }
405
406 if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
407 return get_slave_funcs(nv_encoder)->
408 set_property(to_drm_encoder(nv_encoder), connector, property, value);
409
410 return -EINVAL;
411}
412
413static struct drm_display_mode *
414nouveau_connector_native_mode(struct nouveau_connector *connector)
415{
416 struct drm_device *dev = connector->base.dev;
417 struct drm_display_mode *mode, *largest = NULL;
418 int high_w = 0, high_h = 0, high_v = 0;
419
420 /* Use preferred mode if there is one.. */
421 list_for_each_entry(mode, &connector->base.probed_modes, head) {
422 if (mode->type & DRM_MODE_TYPE_PREFERRED) {
423 NV_DEBUG(dev, "native mode from preferred\n");
424 return drm_mode_duplicate(dev, mode);
425 }
426 }
427
428 /* Otherwise, take the resolution with the largest width, then height,
429 * then vertical refresh
430 */
431 list_for_each_entry(mode, &connector->base.probed_modes, head) {
432 if (mode->hdisplay < high_w)
433 continue;
434
435 if (mode->hdisplay == high_w && mode->vdisplay < high_h)
436 continue;
437
438 if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
439 mode->vrefresh < high_v)
440 continue;
441
442 high_w = mode->hdisplay;
443 high_h = mode->vdisplay;
444 high_v = mode->vrefresh;
445 largest = mode;
446 }
447
448 NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
449 high_w, high_h, high_v);
450 return largest ? drm_mode_duplicate(dev, largest) : NULL;
451}
452
453struct moderec {
454 int hdisplay;
455 int vdisplay;
456};
457
458static struct moderec scaler_modes[] = {
459 { 1920, 1200 },
460 { 1920, 1080 },
461 { 1680, 1050 },
462 { 1600, 1200 },
463 { 1400, 1050 },
464 { 1280, 1024 },
465 { 1280, 960 },
466 { 1152, 864 },
467 { 1024, 768 },
468 { 800, 600 },
469 { 720, 400 },
470 { 640, 480 },
471 { 640, 400 },
472 { 640, 350 },
473 {}
474};
475
476static int
477nouveau_connector_scaler_modes_add(struct drm_connector *connector)
478{
479 struct nouveau_connector *nv_connector = nouveau_connector(connector);
480 struct drm_display_mode *native = nv_connector->native_mode, *m;
481 struct drm_device *dev = connector->dev;
482 struct moderec *mode = &scaler_modes[0];
483 int modes = 0;
484
485 if (!native)
486 return 0;
487
488 while (mode->hdisplay) {
489 if (mode->hdisplay <= native->hdisplay &&
490 mode->vdisplay <= native->vdisplay) {
491 m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
492 drm_mode_vrefresh(native), false,
493 false, false);
494 if (!m)
495 continue;
496
497 m->type |= DRM_MODE_TYPE_DRIVER;
498
499 drm_mode_probed_add(connector, m);
500 modes++;
501 }
502
503 mode++;
504 }
505
506 return modes;
507}
508
509static int
510nouveau_connector_get_modes(struct drm_connector *connector)
511{
512 struct drm_device *dev = connector->dev;
513 struct nouveau_connector *nv_connector = nouveau_connector(connector);
514 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
515 int ret = 0;
516
517 /* If we're not LVDS, destroy the previous native mode, the attached
518 * monitor could have changed.
519 */
520 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
521 nv_connector->native_mode) {
522 drm_mode_destroy(dev, nv_connector->native_mode);
523 nv_connector->native_mode = NULL;
524 }
525
526 if (nv_connector->edid)
527 ret = drm_add_edid_modes(connector, nv_connector->edid);
528
529 /* Find the native mode if this is a digital panel, if we didn't
530 * find any modes through DDC previously add the native mode to
531 * the list of modes.
532 */
533 if (!nv_connector->native_mode)
534 nv_connector->native_mode =
535 nouveau_connector_native_mode(nv_connector);
536 if (ret == 0 && nv_connector->native_mode) {
537 struct drm_display_mode *mode;
538
539 mode = drm_mode_duplicate(dev, nv_connector->native_mode);
540 drm_mode_probed_add(connector, mode);
541 ret = 1;
542 }
543
544 if (nv_encoder->dcb->type == OUTPUT_TV)
545 ret = get_slave_funcs(nv_encoder)->
546 get_modes(to_drm_encoder(nv_encoder), connector);
547
548 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
549 ret += nouveau_connector_scaler_modes_add(connector);
550
551 return ret;
552}
553
554static int
555nouveau_connector_mode_valid(struct drm_connector *connector,
556 struct drm_display_mode *mode)
557{
558 struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
559 struct nouveau_connector *nv_connector = nouveau_connector(connector);
560 struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
561 unsigned min_clock = 25000, max_clock = min_clock;
562 unsigned clock = mode->clock;
563
564 switch (nv_encoder->dcb->type) {
565 case OUTPUT_LVDS:
566 BUG_ON(!nv_connector->native_mode);
567 if (mode->hdisplay > nv_connector->native_mode->hdisplay ||
568 mode->vdisplay > nv_connector->native_mode->vdisplay)
569 return MODE_PANEL;
570
571 min_clock = 0;
572 max_clock = 400000;
573 break;
574 case OUTPUT_TMDS:
575 if ((dev_priv->card_type >= NV_50 && !nouveau_duallink) ||
576 (dev_priv->card_type < NV_50 &&
577 !nv_encoder->dcb->duallink_possible))
578 max_clock = 165000;
579 else
580 max_clock = 330000;
581 break;
582 case OUTPUT_ANALOG:
583 max_clock = nv_encoder->dcb->crtconf.maxfreq;
584 if (!max_clock)
585 max_clock = 350000;
586 break;
587 case OUTPUT_TV:
588 return get_slave_funcs(nv_encoder)->
589 mode_valid(to_drm_encoder(nv_encoder), mode);
590 case OUTPUT_DP:
591 if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
592 max_clock = nv_encoder->dp.link_nr * 270000;
593 else
594 max_clock = nv_encoder->dp.link_nr * 162000;
595
596 clock *= 3;
597 break;
598 }
599
600 if (clock < min_clock)
601 return MODE_CLOCK_LOW;
602
603 if (clock > max_clock)
604 return MODE_CLOCK_HIGH;
605
606 return MODE_OK;
607}
608
609static struct drm_encoder *
610nouveau_connector_best_encoder(struct drm_connector *connector)
611{
612 struct nouveau_connector *nv_connector = nouveau_connector(connector);
613
614 if (nv_connector->detected_encoder)
615 return to_drm_encoder(nv_connector->detected_encoder);
616
617 return NULL;
618}
619
620static const struct drm_connector_helper_funcs
621nouveau_connector_helper_funcs = {
622 .get_modes = nouveau_connector_get_modes,
623 .mode_valid = nouveau_connector_mode_valid,
624 .best_encoder = nouveau_connector_best_encoder,
625};
626
627static const struct drm_connector_funcs
628nouveau_connector_funcs = {
629 .dpms = drm_helper_connector_dpms,
630 .save = NULL,
631 .restore = NULL,
632 .detect = nouveau_connector_detect,
633 .destroy = nouveau_connector_destroy,
634 .fill_modes = drm_helper_probe_single_connector_modes,
635 .set_property = nouveau_connector_set_property,
636 .force = nouveau_connector_force
637};
638
639static int
640nouveau_connector_create_lvds(struct drm_device *dev,
641 struct drm_connector *connector)
642{
643 struct nouveau_connector *nv_connector = nouveau_connector(connector);
644 struct drm_nouveau_private *dev_priv = dev->dev_private;
645 struct nouveau_i2c_chan *i2c = NULL;
646 struct nouveau_encoder *nv_encoder;
647 struct drm_display_mode native, *mode, *temp;
648 bool dummy, if_is_24bit = false;
649 int ret, flags;
650
651 nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
652 if (!nv_encoder)
653 return -ENODEV;
654
655 ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
656 if (ret) {
657 NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
658 return ret;
659 }
660 nv_connector->use_dithering = !if_is_24bit;
661
662 /* Firstly try getting EDID over DDC, if allowed and I2C channel
663 * is available.
664 */
665 if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
666 i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
667
668 if (i2c) {
669 nouveau_connector_ddc_prepare(connector, &flags);
670 nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
671 nouveau_connector_ddc_finish(connector, flags);
672 }
673
674 /* If no EDID found above, and the VBIOS indicates a hardcoded
675 * modeline is avalilable for the panel, set it as the panel's
676 * native mode and exit.
677 */
678 if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
679 (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
680 dev_priv->VBIOS.pub.fp_no_ddc)) {
681 nv_connector->native_mode = drm_mode_duplicate(dev, &native);
682 goto out;
683 }
684
685 /* Still nothing, some VBIOS images have a hardcoded EDID block
686 * stored for the panel stored in them.
687 */
688 if (!nv_connector->edid && !nv_connector->native_mode &&
689 !dev_priv->VBIOS.pub.fp_no_ddc) {
690 nv_connector->edid =
691 (struct edid *)nouveau_bios_embedded_edid(dev);
692 }
693
694 if (!nv_connector->edid)
695 goto out;
696
697 /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
698 * block and look for the preferred mode there.
699 */
700 ret = drm_add_edid_modes(connector, nv_connector->edid);
701 if (ret == 0)
702 goto out;
703 nv_connector->detected_encoder = nv_encoder;
704 nv_connector->native_mode = nouveau_connector_native_mode(nv_connector);
705 list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
706 drm_mode_remove(connector, mode);
707
708out:
709 if (!nv_connector->native_mode) {
710 NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
711 "determine its native mode. Disabling.\n");
712 return -ENODEV;
713 }
714
715 drm_mode_connector_update_edid_property(connector, nv_connector->edid);
716 return 0;
717}
718
719int
720nouveau_connector_create(struct drm_device *dev, int index, int type)
721{
722 struct drm_nouveau_private *dev_priv = dev->dev_private;
723 struct nouveau_connector *nv_connector = NULL;
724 struct drm_connector *connector;
725 struct drm_encoder *encoder;
726 int ret;
727
728 NV_DEBUG(dev, "\n");
729
730 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
731 if (!nv_connector)
732 return -ENOMEM;
733 nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
734 connector = &nv_connector->base;
735
736 switch (type) {
737 case DRM_MODE_CONNECTOR_VGA:
738 NV_INFO(dev, "Detected a VGA connector\n");
739 break;
740 case DRM_MODE_CONNECTOR_DVID:
741 NV_INFO(dev, "Detected a DVI-D connector\n");
742 break;
743 case DRM_MODE_CONNECTOR_DVII:
744 NV_INFO(dev, "Detected a DVI-I connector\n");
745 break;
746 case DRM_MODE_CONNECTOR_LVDS:
747 NV_INFO(dev, "Detected a LVDS connector\n");
748 break;
749 case DRM_MODE_CONNECTOR_TV:
750 NV_INFO(dev, "Detected a TV connector\n");
751 break;
752 case DRM_MODE_CONNECTOR_DisplayPort:
753 NV_INFO(dev, "Detected a DisplayPort connector\n");
754 break;
755 default:
756 NV_ERROR(dev, "Unknown connector, this is not good.\n");
757 break;
758 }
759
760 /* defaults, will get overridden in detect() */
761 connector->interlace_allowed = false;
762 connector->doublescan_allowed = false;
763
764 drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
765 drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
766
767 /* Init DVI-I specific properties */
768 if (type == DRM_MODE_CONNECTOR_DVII) {
769 drm_mode_create_dvi_i_properties(dev);
770 drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
771 drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
772 }
773
774 if (type != DRM_MODE_CONNECTOR_LVDS)
775 nv_connector->use_dithering = false;
776
777 if (type == DRM_MODE_CONNECTOR_DVID ||
778 type == DRM_MODE_CONNECTOR_DVII ||
779 type == DRM_MODE_CONNECTOR_LVDS ||
780 type == DRM_MODE_CONNECTOR_DisplayPort) {
781 nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
782
783 drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
784 nv_connector->scaling_mode);
785 drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
786 nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
787 : DRM_MODE_DITHERING_OFF);
788
789 } else {
790 nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
791
792 if (type == DRM_MODE_CONNECTOR_VGA &&
793 dev_priv->card_type >= NV_50) {
794 drm_connector_attach_property(connector,
795 dev->mode_config.scaling_mode_property,
796 nv_connector->scaling_mode);
797 }
798 }
799
800 /* attach encoders */
801 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
802 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
803
804 if (nv_encoder->dcb->connector != index)
805 continue;
806
807 if (get_slave_funcs(nv_encoder))
808 get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
809
810 drm_mode_connector_attach_encoder(connector, encoder);
811 }
812
813 drm_sysfs_connector_add(connector);
814
815 if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
816 ret = nouveau_connector_create_lvds(dev, connector);
817 if (ret) {
818 connector->funcs->destroy(connector);
819 return ret;
820 }
821 }
822
823 return 0;
824}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
new file mode 100644
index 000000000000..728b8090e5ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_CONNECTOR_H__
28#define __NOUVEAU_CONNECTOR_H__
29
30#include "drm_edid.h"
31#include "nouveau_i2c.h"
32
33struct nouveau_connector {
34 struct drm_connector base;
35
36 struct dcb_connector_table_entry *dcb;
37
38 int scaling_mode;
39 bool use_dithering;
40
41 struct nouveau_encoder *detected_encoder;
42 struct edid *edid;
43 struct drm_display_mode *native_mode;
44};
45
46static inline struct nouveau_connector *nouveau_connector(
47 struct drm_connector *con)
48{
49 return container_of(con, struct nouveau_connector, base);
50}
51
52int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
53
54#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
new file mode 100644
index 000000000000..49fa7b2d257e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_CRTC_H__
28#define __NOUVEAU_CRTC_H__
29
30struct nouveau_crtc {
31 struct drm_crtc base;
32
33 int index;
34
35 struct drm_display_mode *mode;
36
37 uint32_t dpms_saved_fp_control;
38 uint32_t fp_users;
39 int saturation;
40 int sharpness;
41 int last_dpms;
42
43 struct {
44 int cpp;
45 bool blanked;
46 uint32_t offset;
47 uint32_t tile_flags;
48 } fb;
49
50 struct {
51 struct nouveau_bo *nvbo;
52 bool visible;
53 uint32_t offset;
54 void (*set_offset)(struct nouveau_crtc *, uint32_t offset);
55 void (*set_pos)(struct nouveau_crtc *, int x, int y);
56 void (*hide)(struct nouveau_crtc *, bool update);
57 void (*show)(struct nouveau_crtc *, bool update);
58 } cursor;
59
60 struct {
61 struct nouveau_bo *nvbo;
62 uint16_t r[256];
63 uint16_t g[256];
64 uint16_t b[256];
65 int depth;
66 } lut;
67
68 int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
69 int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
70};
71
72static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
73{
74 return container_of(crtc, struct nouveau_crtc, base);
75}
76
77static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
78{
79 return &crtc->base;
80}
81
82int nv50_crtc_create(struct drm_device *dev, int index);
83int nv50_cursor_init(struct nouveau_crtc *);
84void nv50_cursor_fini(struct nouveau_crtc *);
85int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
86 uint32_t buffer_handle, uint32_t width,
87 uint32_t height);
88int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
89
90int nv04_cursor_init(struct nouveau_crtc *);
91
92struct nouveau_connector *
93nouveau_crtc_connector_get(struct nouveau_crtc *crtc);
94
95#endif /* __NOUVEAU_CRTC_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
new file mode 100644
index 000000000000..d79db3698f16
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -0,0 +1,155 @@
1/*
2 * Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial
14 * portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
20 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26/*
27 * Authors:
28 * Ben Skeggs <bskeggs@redhat.com>
29 */
30
31#include <linux/debugfs.h>
32
33#include "drmP.h"
34#include "nouveau_drv.h"
35
36static int
37nouveau_debugfs_channel_info(struct seq_file *m, void *data)
38{
39 struct drm_info_node *node = (struct drm_info_node *) m->private;
40 struct nouveau_channel *chan = node->info_ent->data;
41
42 seq_printf(m, "channel id : %d\n", chan->id);
43
44 seq_printf(m, "cpu fifo state:\n");
45 seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
46 seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
47 seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
48 seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
49 seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
50
51 seq_printf(m, "gpu fifo state:\n");
52 seq_printf(m, " get: 0x%08x\n",
53 nvchan_rd32(chan, chan->user_get));
54 seq_printf(m, " put: 0x%08x\n",
55 nvchan_rd32(chan, chan->user_put));
56
57 seq_printf(m, "last fence : %d\n", chan->fence.sequence);
58 seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
59 return 0;
60}
61
62int
63nouveau_debugfs_channel_init(struct nouveau_channel *chan)
64{
65 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
66 struct drm_minor *minor = chan->dev->primary;
67 int ret;
68
69 if (!dev_priv->debugfs.channel_root) {
70 dev_priv->debugfs.channel_root =
71 debugfs_create_dir("channel", minor->debugfs_root);
72 if (!dev_priv->debugfs.channel_root)
73 return -ENOENT;
74 }
75
76 snprintf(chan->debugfs.name, 32, "%d", chan->id);
77 chan->debugfs.info.name = chan->debugfs.name;
78 chan->debugfs.info.show = nouveau_debugfs_channel_info;
79 chan->debugfs.info.driver_features = 0;
80 chan->debugfs.info.data = chan;
81
82 ret = drm_debugfs_create_files(&chan->debugfs.info, 1,
83 dev_priv->debugfs.channel_root,
84 chan->dev->primary);
85 if (ret == 0)
86 chan->debugfs.active = true;
87 return ret;
88}
89
90void
91nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
92{
93 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
94
95 if (!chan->debugfs.active)
96 return;
97
98 drm_debugfs_remove_files(&chan->debugfs.info, 1, chan->dev->primary);
99 chan->debugfs.active = false;
100
101 if (chan == dev_priv->channel) {
102 debugfs_remove(dev_priv->debugfs.channel_root);
103 dev_priv->debugfs.channel_root = NULL;
104 }
105}
106
107static int
108nouveau_debugfs_chipset_info(struct seq_file *m, void *data)
109{
110 struct drm_info_node *node = (struct drm_info_node *) m->private;
111 struct drm_minor *minor = node->minor;
112 struct drm_device *dev = minor->dev;
113 struct drm_nouveau_private *dev_priv = dev->dev_private;
114 uint32_t ppci_0;
115
116 ppci_0 = nv_rd32(dev, dev_priv->chipset >= 0x40 ? 0x88000 : 0x1800);
117
118 seq_printf(m, "PMC_BOOT_0: 0x%08x\n", nv_rd32(dev, NV03_PMC_BOOT_0));
119 seq_printf(m, "PCI ID : 0x%04x:0x%04x\n",
120 ppci_0 & 0xffff, ppci_0 >> 16);
121 return 0;
122}
123
124static int
125nouveau_debugfs_memory_info(struct seq_file *m, void *data)
126{
127 struct drm_info_node *node = (struct drm_info_node *) m->private;
128 struct drm_minor *minor = node->minor;
129 struct drm_device *dev = minor->dev;
130
131 seq_printf(m, "VRAM total: %dKiB\n",
132 (int)(nouveau_mem_fb_amount(dev) >> 10));
133 return 0;
134}
135
136static struct drm_info_list nouveau_debugfs_list[] = {
137 { "chipset", nouveau_debugfs_chipset_info, 0, NULL },
138 { "memory", nouveau_debugfs_memory_info, 0, NULL },
139};
140#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
141
142int
143nouveau_debugfs_init(struct drm_minor *minor)
144{
145 drm_debugfs_create_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
146 minor->debugfs_root, minor);
147 return 0;
148}
149
150void
151nouveau_debugfs_takedown(struct drm_minor *minor)
152{
153 drm_debugfs_remove_files(nouveau_debugfs_list, NOUVEAU_DEBUGFS_ENTRIES,
154 minor);
155}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
new file mode 100644
index 000000000000..dfc94391d71e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_fb.h"
31#include "nouveau_fbcon.h"
32
33static void
34nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
35{
36 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
37 struct drm_device *dev = drm_fb->dev;
38
39 if (drm_fb->fbdev)
40 nouveau_fbcon_remove(dev, drm_fb);
41
42 if (fb->nvbo) {
43 mutex_lock(&dev->struct_mutex);
44 drm_gem_object_unreference(fb->nvbo->gem);
45 mutex_unlock(&dev->struct_mutex);
46 }
47
48 drm_framebuffer_cleanup(drm_fb);
49 kfree(fb);
50}
51
52static int
53nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
54 struct drm_file *file_priv,
55 unsigned int *handle)
56{
57 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
58
59 return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
60}
61
62static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
63 .destroy = nouveau_user_framebuffer_destroy,
64 .create_handle = nouveau_user_framebuffer_create_handle,
65};
66
67struct drm_framebuffer *
68nouveau_framebuffer_create(struct drm_device *dev, struct nouveau_bo *nvbo,
69 struct drm_mode_fb_cmd *mode_cmd)
70{
71 struct nouveau_framebuffer *fb;
72 int ret;
73
74 fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
75 if (!fb)
76 return NULL;
77
78 ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
79 if (ret) {
80 kfree(fb);
81 return NULL;
82 }
83
84 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
85
86 fb->nvbo = nvbo;
87 return &fb->base;
88}
89
90static struct drm_framebuffer *
91nouveau_user_framebuffer_create(struct drm_device *dev,
92 struct drm_file *file_priv,
93 struct drm_mode_fb_cmd *mode_cmd)
94{
95 struct drm_framebuffer *fb;
96 struct drm_gem_object *gem;
97
98 gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
99 if (!gem)
100 return NULL;
101
102 fb = nouveau_framebuffer_create(dev, nouveau_gem_object(gem), mode_cmd);
103 if (!fb) {
104 drm_gem_object_unreference(gem);
105 return NULL;
106 }
107
108 return fb;
109}
110
111const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
112 .fb_create = nouveau_user_framebuffer_create,
113 .fb_changed = nouveau_fbcon_probe,
114};
115
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 000000000000..703553687b20
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30#include "nouveau_dma.h"
31
32int
33nouveau_dma_init(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct drm_nouveau_private *dev_priv = dev->dev_private;
37 struct nouveau_gpuobj *m2mf = NULL;
38 int ret, i;
39
40 /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
41 ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
42 0x0039 : 0x5039, &m2mf);
43 if (ret)
44 return ret;
45
46 ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
47 if (ret)
48 return ret;
49
50 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
51 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
52 if (ret)
53 return ret;
54
55 /* Map push buffer */
56 ret = nouveau_bo_map(chan->pushbuf_bo);
57 if (ret)
58 return ret;
59
60 /* Map M2MF notifier object - fbcon. */
61 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
62 ret = nouveau_bo_map(chan->notifier_bo);
63 if (ret)
64 return ret;
65 }
66
67 /* Initialise DMA vars */
68 chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
69 chan->dma.put = 0;
70 chan->dma.cur = chan->dma.put;
71 chan->dma.free = chan->dma.max - chan->dma.cur;
72
73 /* Insert NOPS for NOUVEAU_DMA_SKIPS */
74 ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
75 if (ret)
76 return ret;
77
78 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
79 OUT_RING(chan, 0);
80
81 /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
82 ret = RING_SPACE(chan, 4);
83 if (ret)
84 return ret;
85 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
86 OUT_RING(chan, NvM2MF);
87 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
88 OUT_RING(chan, NvNotify0);
89
90 /* Sit back and pray the channel works.. */
91 FIRE_RING(chan);
92
93 return 0;
94}
95
96void
97OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
98{
99 bool is_iomem;
100 u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
101 mem = &mem[chan->dma.cur];
102 if (is_iomem)
103 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
104 else
105 memcpy(mem, data, nr_dwords * 4);
106 chan->dma.cur += nr_dwords;
107}
108
109static inline bool
110READ_GET(struct nouveau_channel *chan, uint32_t *get)
111{
112 uint32_t val;
113
114 val = nvchan_rd32(chan, chan->user_get);
115 if (val < chan->pushbuf_base ||
116 val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
117 /* meaningless to dma_wait() except to know whether the
118 * GPU has stalled or not
119 */
120 *get = val;
121 return false;
122 }
123
124 *get = (val - chan->pushbuf_base) >> 2;
125 return true;
126}
127
128int
129nouveau_dma_wait(struct nouveau_channel *chan, int size)
130{
131 uint32_t get, prev_get = 0, cnt = 0;
132 bool get_valid;
133
134 while (chan->dma.free < size) {
135 /* reset counter as long as GET is still advancing, this is
136 * to avoid misdetecting a GPU lockup if the GPU happens to
137 * just be processing an operation that takes a long time
138 */
139 get_valid = READ_GET(chan, &get);
140 if (get != prev_get) {
141 prev_get = get;
142 cnt = 0;
143 }
144
145 if ((++cnt & 0xff) == 0) {
146 DRM_UDELAY(1);
147 if (cnt > 100000)
148 return -EBUSY;
149 }
150
151 /* loop until we have a usable GET pointer. the value
152 * we read from the GPU may be outside the main ring if
153 * PFIFO is processing a buffer called from the main ring,
154 * discard these values until something sensible is seen.
155 *
156 * the other case we discard GET is while the GPU is fetching
157 * from the SKIPS area, so the code below doesn't have to deal
158 * with some fun corner cases.
159 */
160 if (!get_valid || get < NOUVEAU_DMA_SKIPS)
161 continue;
162
163 if (get <= chan->dma.cur) {
164 /* engine is fetching behind us, or is completely
165 * idle (GET == PUT) so we have free space up until
166 * the end of the push buffer
167 *
168 * we can only hit that path once per call due to
169 * looping back to the beginning of the push buffer,
170 * we'll hit the fetching-ahead-of-us path from that
171 * point on.
172 *
173 * the *one* exception to that rule is if we read
174 * GET==PUT, in which case the below conditional will
175 * always succeed and break us out of the wait loop.
176 */
177 chan->dma.free = chan->dma.max - chan->dma.cur;
178 if (chan->dma.free >= size)
179 break;
180
181 /* not enough space left at the end of the push buffer,
182 * instruct the GPU to jump back to the start right
183 * after processing the currently pending commands.
184 */
185 OUT_RING(chan, chan->pushbuf_base | 0x20000000);
186 WRITE_PUT(NOUVEAU_DMA_SKIPS);
187
188 /* we're now submitting commands at the start of
189 * the push buffer.
190 */
191 chan->dma.cur =
192 chan->dma.put = NOUVEAU_DMA_SKIPS;
193 }
194
195 /* engine fetching ahead of us, we have space up until the
196 * current GET pointer. the "- 1" is to ensure there's
197 * space left to emit a jump back to the beginning of the
198 * push buffer if we require it. we can never get GET == PUT
199 * here, so this is safe.
200 */
201 chan->dma.free = get - chan->dma.cur - 1;
202 }
203
204 return 0;
205}
206
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 000000000000..04e85d8f757e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,157 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_DMA_H__
28#define __NOUVEAU_DMA_H__
29
30#ifndef NOUVEAU_DMA_DEBUG
31#define NOUVEAU_DMA_DEBUG 0
32#endif
33
34/*
35 * There's a hw race condition where you can't jump to your PUT offset,
36 * to avoid this we jump to offset + SKIPS and fill the difference with
37 * NOPs.
38 *
39 * xf86-video-nv configures the DMA fetch size to 32 bytes, and uses
40 * a SKIPS value of 8. Lets assume that the race condition is to do
41 * with writing into the fetch area, we configure a fetch size of 128
42 * bytes so we need a larger SKIPS value.
43 */
44#define NOUVEAU_DMA_SKIPS (128 / 4)
45
46/* Hardcoded object assignments to subchannels (subchannel id). */
47enum {
48 NvSubM2MF = 0,
49 NvSub2D = 1,
50 NvSubCtxSurf2D = 1,
51 NvSubGdiRect = 2,
52 NvSubImageBlit = 3
53};
54
55/* Object handles. */
56enum {
57 NvM2MF = 0x80000001,
58 NvDmaFB = 0x80000002,
59 NvDmaTT = 0x80000003,
60 NvDmaVRAM = 0x80000004,
61 NvDmaGART = 0x80000005,
62 NvNotify0 = 0x80000006,
63 Nv2D = 0x80000007,
64 NvCtxSurf2D = 0x80000008,
65 NvRop = 0x80000009,
66 NvImagePatt = 0x8000000a,
67 NvClipRect = 0x8000000b,
68 NvGdiRect = 0x8000000c,
69 NvImageBlit = 0x8000000d,
70
71 /* G80+ display objects */
72 NvEvoVRAM = 0x01000000,
73 NvEvoFB16 = 0x01000001,
74 NvEvoFB32 = 0x01000002
75};
76
77#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
78#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
79#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
80#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
81#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
82#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
83#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
84#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY 0x00000180
85#define NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE 0x00000184
86#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
87
88#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
89#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
90#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
91#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
92#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
93
94static __must_check inline int
95RING_SPACE(struct nouveau_channel *chan, int size)
96{
97 if (chan->dma.free < size) {
98 int ret;
99
100 ret = nouveau_dma_wait(chan, size);
101 if (ret)
102 return ret;
103 }
104
105 chan->dma.free -= size;
106 return 0;
107}
108
109static inline void
110OUT_RING(struct nouveau_channel *chan, int data)
111{
112 if (NOUVEAU_DMA_DEBUG) {
113 NV_INFO(chan->dev, "Ch%d/0x%08x: 0x%08x\n",
114 chan->id, chan->dma.cur << 2, data);
115 }
116
117 nouveau_bo_wr32(chan->pushbuf_bo, chan->dma.cur++, data);
118}
119
120extern void
121OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords);
122
123static inline void
124BEGIN_RING(struct nouveau_channel *chan, int subc, int mthd, int size)
125{
126 OUT_RING(chan, (subc << 13) | (size << 18) | mthd);
127}
128
129#define WRITE_PUT(val) do { \
130 DRM_MEMORYBARRIER(); \
131 nouveau_bo_rd32(chan->pushbuf_bo, 0); \
132 nvchan_wr32(chan, chan->user_put, ((val) << 2) + chan->pushbuf_base); \
133} while (0)
134
135static inline void
136FIRE_RING(struct nouveau_channel *chan)
137{
138 if (NOUVEAU_DMA_DEBUG) {
139 NV_INFO(chan->dev, "Ch%d/0x%08x: PUSH!\n",
140 chan->id, chan->dma.cur << 2);
141 }
142
143 if (chan->dma.cur == chan->dma.put)
144 return;
145 chan->accel_done = true;
146
147 WRITE_PUT(chan->dma.cur);
148 chan->dma.put = chan->dma.cur;
149}
150
151static inline void
152WIND_RING(struct nouveau_channel *chan)
153{
154 chan->dma.cur = chan->dma.put;
155}
156
157#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
new file mode 100644
index 000000000000..de61f4640e12
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -0,0 +1,569 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_encoder.h"
29
30static int
31auxch_rd(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
32{
33 struct drm_device *dev = encoder->dev;
34 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
35 struct nouveau_i2c_chan *auxch;
36 int ret;
37
38 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
39 if (!auxch)
40 return -ENODEV;
41
42 ret = nouveau_dp_auxch(auxch, 9, address, buf, size);
43 if (ret)
44 return ret;
45
46 return 0;
47}
48
49static int
50auxch_wr(struct drm_encoder *encoder, int address, uint8_t *buf, int size)
51{
52 struct drm_device *dev = encoder->dev;
53 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
54 struct nouveau_i2c_chan *auxch;
55 int ret;
56
57 auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
58 if (!auxch)
59 return -ENODEV;
60
61 ret = nouveau_dp_auxch(auxch, 8, address, buf, size);
62 return ret;
63}
64
65static int
66nouveau_dp_lane_count_set(struct drm_encoder *encoder, uint8_t cmd)
67{
68 struct drm_device *dev = encoder->dev;
69 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
70 uint32_t tmp;
71 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
72
73 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
74 tmp &= ~(NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED |
75 NV50_SOR_DP_CTRL_LANE_MASK);
76 tmp |= ((1 << (cmd & DP_LANE_COUNT_MASK)) - 1) << 16;
77 if (cmd & DP_LANE_COUNT_ENHANCED_FRAME_EN)
78 tmp |= NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED;
79 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
80
81 return auxch_wr(encoder, DP_LANE_COUNT_SET, &cmd, 1);
82}
83
84static int
85nouveau_dp_link_bw_set(struct drm_encoder *encoder, uint8_t cmd)
86{
87 struct drm_device *dev = encoder->dev;
88 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
89 uint32_t tmp;
90 int reg = 0x614300 + (nv_encoder->or * 0x800);
91
92 tmp = nv_rd32(dev, reg);
93 tmp &= 0xfff3ffff;
94 if (cmd == DP_LINK_BW_2_7)
95 tmp |= 0x00040000;
96 nv_wr32(dev, reg, tmp);
97
98 return auxch_wr(encoder, DP_LINK_BW_SET, &cmd, 1);
99}
100
101static int
102nouveau_dp_link_train_set(struct drm_encoder *encoder, int pattern)
103{
104 struct drm_device *dev = encoder->dev;
105 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
106 uint32_t tmp;
107 uint8_t cmd;
108 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
109 int ret;
110
111 tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
112 tmp &= ~NV50_SOR_DP_CTRL_TRAINING_PATTERN;
113 tmp |= (pattern << 24);
114 nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
115
116 ret = auxch_rd(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
117 if (ret)
118 return ret;
119 cmd &= ~DP_TRAINING_PATTERN_MASK;
120 cmd |= (pattern & DP_TRAINING_PATTERN_MASK);
121 return auxch_wr(encoder, DP_TRAINING_PATTERN_SET, &cmd, 1);
122}
123
124static int
125nouveau_dp_max_voltage_swing(struct drm_encoder *encoder)
126{
127 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
128 struct drm_device *dev = encoder->dev;
129 struct bit_displayport_encoder_table_entry *dpse;
130 struct bit_displayport_encoder_table *dpe;
131 int i, dpe_headerlen, max_vs = 0;
132
133 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
134 if (!dpe)
135 return false;
136 dpse = (void *)((char *)dpe + dpe_headerlen);
137
138 for (i = 0; i < dpe_headerlen; i++, dpse++) {
139 if (dpse->vs_level > max_vs)
140 max_vs = dpse->vs_level;
141 }
142
143 return max_vs;
144}
145
146static int
147nouveau_dp_max_pre_emphasis(struct drm_encoder *encoder, int vs)
148{
149 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
150 struct drm_device *dev = encoder->dev;
151 struct bit_displayport_encoder_table_entry *dpse;
152 struct bit_displayport_encoder_table *dpe;
153 int i, dpe_headerlen, max_pre = 0;
154
155 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
156 if (!dpe)
157 return false;
158 dpse = (void *)((char *)dpe + dpe_headerlen);
159
160 for (i = 0; i < dpe_headerlen; i++, dpse++) {
161 if (dpse->vs_level != vs)
162 continue;
163
164 if (dpse->pre_level > max_pre)
165 max_pre = dpse->pre_level;
166 }
167
168 return max_pre;
169}
170
171static bool
172nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
173{
174 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
175 struct drm_device *dev = encoder->dev;
176 struct bit_displayport_encoder_table_entry *dpse;
177 struct bit_displayport_encoder_table *dpe;
178 int ret, i, dpe_headerlen, vs = 0, pre = 0;
179 uint8_t request[2];
180
181 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
182 if (!dpe)
183 return false;
184 dpse = (void *)((char *)dpe + dpe_headerlen);
185
186 ret = auxch_rd(encoder, DP_ADJUST_REQUEST_LANE0_1, request, 2);
187 if (ret)
188 return false;
189
190 NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
191
192 /* Keep all lanes at the same level.. */
193 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
194 int lane_req = (request[i >> 1] >> ((i & 1) << 2)) & 0xf;
195 int lane_vs = lane_req & 3;
196 int lane_pre = (lane_req >> 2) & 3;
197
198 if (lane_vs > vs)
199 vs = lane_vs;
200 if (lane_pre > pre)
201 pre = lane_pre;
202 }
203
204 if (vs >= nouveau_dp_max_voltage_swing(encoder)) {
205 vs = nouveau_dp_max_voltage_swing(encoder);
206 vs |= 4;
207 }
208
209 if (pre >= nouveau_dp_max_pre_emphasis(encoder, vs & 3)) {
210 pre = nouveau_dp_max_pre_emphasis(encoder, vs & 3);
211 pre |= 4;
212 }
213
214 /* Update the configuration for all lanes.. */
215 for (i = 0; i < nv_encoder->dp.link_nr; i++)
216 config[i] = (pre << 3) | vs;
217
218 return true;
219}
220
221static bool
222nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
223{
224 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
225 struct drm_device *dev = encoder->dev;
226 struct bit_displayport_encoder_table_entry *dpse;
227 struct bit_displayport_encoder_table *dpe;
228 int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
229 int dpe_headerlen, ret, i;
230
231 NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
232 config[0], config[1], config[2], config[3]);
233
234 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
235 if (!dpe)
236 return false;
237 dpse = (void *)((char *)dpe + dpe_headerlen);
238
239 for (i = 0; i < dpe->record_nr; i++, dpse++) {
240 if (dpse->vs_level == (config[0] & 3) &&
241 dpse->pre_level == ((config[0] >> 3) & 3))
242 break;
243 }
244 BUG_ON(i == dpe->record_nr);
245
246 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
247 const int shift[4] = { 16, 8, 0, 24 };
248 uint32_t mask = 0xff << shift[i];
249 uint32_t reg0, reg1, reg2;
250
251 reg0 = nv_rd32(dev, NV50_SOR_DP_UNK118(or, link)) & ~mask;
252 reg0 |= (dpse->reg0 << shift[i]);
253 reg1 = nv_rd32(dev, NV50_SOR_DP_UNK120(or, link)) & ~mask;
254 reg1 |= (dpse->reg1 << shift[i]);
255 reg2 = nv_rd32(dev, NV50_SOR_DP_UNK130(or, link)) & 0xffff00ff;
256 reg2 |= (dpse->reg2 << 8);
257 nv_wr32(dev, NV50_SOR_DP_UNK118(or, link), reg0);
258 nv_wr32(dev, NV50_SOR_DP_UNK120(or, link), reg1);
259 nv_wr32(dev, NV50_SOR_DP_UNK130(or, link), reg2);
260 }
261
262 ret = auxch_wr(encoder, DP_TRAINING_LANE0_SET, config, 4);
263 if (ret)
264 return false;
265
266 return true;
267}
268
269bool
270nouveau_dp_link_train(struct drm_encoder *encoder)
271{
272 struct drm_device *dev = encoder->dev;
273 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
274 uint8_t config[4];
275 uint8_t status[3];
276 bool cr_done, cr_max_vs, eq_done;
277 int ret = 0, i, tries, voltage;
278
279 NV_DEBUG(dev, "link training!!\n");
280train:
281 cr_done = eq_done = false;
282
283 /* set link configuration */
284 NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
285 nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
286
287 ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
288 if (ret)
289 return false;
290
291 config[0] = nv_encoder->dp.link_nr;
292 if (nv_encoder->dp.dpcd_version >= 0x11)
293 config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
294
295 ret = nouveau_dp_lane_count_set(encoder, config[0]);
296 if (ret)
297 return false;
298
299 /* clock recovery */
300 NV_DEBUG(dev, "\tbegin cr\n");
301 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
302 if (ret)
303 goto stop;
304
305 tries = 0;
306 voltage = -1;
307 memset(config, 0x00, sizeof(config));
308 for (;;) {
309 if (!nouveau_dp_link_train_commit(encoder, config))
310 break;
311
312 udelay(100);
313
314 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
315 if (ret)
316 break;
317 NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
318 status[0], status[1]);
319
320 cr_done = true;
321 cr_max_vs = false;
322 for (i = 0; i < nv_encoder->dp.link_nr; i++) {
323 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
324
325 if (!(lane & DP_LANE_CR_DONE)) {
326 cr_done = false;
327 if (config[i] & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED)
328 cr_max_vs = true;
329 break;
330 }
331 }
332
333 if ((config[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
334 voltage = config[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
335 tries = 0;
336 }
337
338 if (cr_done || cr_max_vs || (++tries == 5))
339 break;
340
341 if (!nouveau_dp_link_train_adjust(encoder, config))
342 break;
343 }
344
345 if (!cr_done)
346 goto stop;
347
348 /* channel equalisation */
349 NV_DEBUG(dev, "\tbegin eq\n");
350 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
351 if (ret)
352 goto stop;
353
354 for (tries = 0; tries <= 5; tries++) {
355 udelay(400);
356
357 ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
358 if (ret)
359 break;
360 NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
361 status[0], status[1]);
362
363 eq_done = true;
364 if (!(status[2] & DP_INTERLANE_ALIGN_DONE))
365 eq_done = false;
366
367 for (i = 0; eq_done && i < nv_encoder->dp.link_nr; i++) {
368 int lane = (status[i >> 1] >> ((i & 1) * 4)) & 0xf;
369
370 if (!(lane & DP_LANE_CR_DONE)) {
371 cr_done = false;
372 break;
373 }
374
375 if (!(lane & DP_LANE_CHANNEL_EQ_DONE) ||
376 !(lane & DP_LANE_SYMBOL_LOCKED)) {
377 eq_done = false;
378 break;
379 }
380 }
381
382 if (eq_done || !cr_done)
383 break;
384
385 if (!nouveau_dp_link_train_adjust(encoder, config) ||
386 !nouveau_dp_link_train_commit(encoder, config))
387 break;
388 }
389
390stop:
391 /* end link training */
392 ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_DISABLE);
393 if (ret)
394 return false;
395
396 /* retry at a lower setting, if possible */
397 if (!ret && !(eq_done && cr_done)) {
398 NV_DEBUG(dev, "\twe failed\n");
399 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
400 NV_DEBUG(dev, "retry link training at low rate\n");
401 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
402 goto train;
403 }
404 }
405
406 return eq_done;
407}
408
409bool
410nouveau_dp_detect(struct drm_encoder *encoder)
411{
412 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
413 struct drm_device *dev = encoder->dev;
414 uint8_t dpcd[4];
415 int ret;
416
417 ret = auxch_rd(encoder, 0x0000, dpcd, 4);
418 if (ret)
419 return false;
420
421 NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
422 "display: link_bw %d, link_nr %d version 0x%02x\n",
423 nv_encoder->dcb->dpconf.link_bw,
424 nv_encoder->dcb->dpconf.link_nr,
425 dpcd[1], dpcd[2] & 0x0f, dpcd[0]);
426
427 nv_encoder->dp.dpcd_version = dpcd[0];
428
429 nv_encoder->dp.link_bw = dpcd[1];
430 if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62 &&
431 !nv_encoder->dcb->dpconf.link_bw)
432 nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
433
434 nv_encoder->dp.link_nr = dpcd[2] & 0xf;
435 if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
436 nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
437
438 return true;
439}
440
441int
442nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
443 uint8_t *data, int data_nr)
444{
445 struct drm_device *dev = auxch->dev;
446 uint32_t tmp, ctrl, stat = 0, data32[4] = {};
447 int ret = 0, i, index = auxch->rd;
448
449 NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
450
451 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
452 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
453 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
454 if (!(tmp & 0x01000000)) {
455 NV_ERROR(dev, "expected bit 24 == 1, got 0x%08x\n", tmp);
456 ret = -EIO;
457 goto out;
458 }
459
460 for (i = 0; i < 3; i++) {
461 tmp = nv_rd32(dev, NV50_AUXCH_STAT(auxch->rd));
462 if (tmp & NV50_AUXCH_STAT_STATE_READY)
463 break;
464 udelay(100);
465 }
466
467 if (i == 3) {
468 ret = -EBUSY;
469 goto out;
470 }
471
472 if (!(cmd & 1)) {
473 memcpy(data32, data, data_nr);
474 for (i = 0; i < 4; i++) {
475 NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
476 nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
477 }
478 }
479
480 nv_wr32(dev, NV50_AUXCH_ADDR(index), addr);
481 ctrl = nv_rd32(dev, NV50_AUXCH_CTRL(index));
482 ctrl &= ~(NV50_AUXCH_CTRL_CMD | NV50_AUXCH_CTRL_LEN);
483 ctrl |= (cmd << NV50_AUXCH_CTRL_CMD_SHIFT);
484 ctrl |= ((data_nr - 1) << NV50_AUXCH_CTRL_LEN_SHIFT);
485
486 for (;;) {
487 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
488 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
489 nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
490 if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
491 NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
492 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
493 return -EBUSY;
494 }
495
496 udelay(400);
497
498 stat = nv_rd32(dev, NV50_AUXCH_STAT(index));
499 if ((stat & NV50_AUXCH_STAT_REPLY_AUX) !=
500 NV50_AUXCH_STAT_REPLY_AUX_DEFER)
501 break;
502 }
503
504 if (cmd & 1) {
505 for (i = 0; i < 4; i++) {
506 data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
507 NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
508 }
509 memcpy(data, data32, data_nr);
510 }
511
512out:
513 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
514 nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp & ~0x00100000);
515 tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
516 if (tmp & 0x01000000) {
517 NV_ERROR(dev, "expected bit 24 == 0, got 0x%08x\n", tmp);
518 ret = -EIO;
519 }
520
521 udelay(400);
522
523 return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
524}
525
526int
527nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
528 uint8_t write_byte, uint8_t *read_byte)
529{
530 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
531 struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
532 struct drm_device *dev = auxch->dev;
533 int ret = 0, cmd, addr = algo_data->address;
534 uint8_t *buf;
535
536 if (mode == MODE_I2C_READ) {
537 cmd = AUX_I2C_READ;
538 buf = read_byte;
539 } else {
540 cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
541 buf = &write_byte;
542 }
543
544 if (!(mode & MODE_I2C_STOP))
545 cmd |= AUX_I2C_MOT;
546
547 if (mode & MODE_I2C_START)
548 return 1;
549
550 for (;;) {
551 ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
552 if (ret < 0)
553 return ret;
554
555 switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
556 case NV50_AUXCH_STAT_REPLY_I2C_ACK:
557 return 1;
558 case NV50_AUXCH_STAT_REPLY_I2C_NACK:
559 return -EREMOTEIO;
560 case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
561 udelay(100);
562 break;
563 default:
564 NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
565 return -EREMOTEIO;
566 }
567 }
568}
569
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 000000000000..35249c35118f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,405 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#include <linux/console.h>
26
27#include "drmP.h"
28#include "drm.h"
29#include "drm_crtc_helper.h"
30#include "nouveau_drv.h"
31#include "nouveau_hw.h"
32#include "nouveau_fb.h"
33#include "nouveau_fbcon.h"
34#include "nv50_display.h"
35
36#include "drm_pciids.h"
37
38MODULE_PARM_DESC(noagp, "Disable AGP");
39int nouveau_noagp;
40module_param_named(noagp, nouveau_noagp, int, 0400);
41
42MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
43static int nouveau_modeset = -1; /* kms */
44module_param_named(modeset, nouveau_modeset, int, 0400);
45
46MODULE_PARM_DESC(vbios, "Override default VBIOS location");
47char *nouveau_vbios;
48module_param_named(vbios, nouveau_vbios, charp, 0400);
49
50MODULE_PARM_DESC(vram_pushbuf, "Force DMA push buffers to be in VRAM");
51int nouveau_vram_pushbuf;
52module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
53
54MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
55int nouveau_vram_notify;
56module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
57
58MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
59int nouveau_duallink = 1;
60module_param_named(duallink, nouveau_duallink, int, 0400);
61
62MODULE_PARM_DESC(uscript_lvds, "LVDS output script table ID (>=GeForce 8)");
63int nouveau_uscript_lvds = -1;
64module_param_named(uscript_lvds, nouveau_uscript_lvds, int, 0400);
65
66MODULE_PARM_DESC(uscript_tmds, "TMDS output script table ID (>=GeForce 8)");
67int nouveau_uscript_tmds = -1;
68module_param_named(uscript_tmds, nouveau_uscript_tmds, int, 0400);
69
70MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
71 "\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
72 "\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
73 "\t\tDefault: PAL\n"
74 "\t\t*NOTE* Ignored for cards with external TV encoders.");
75char *nouveau_tv_norm;
76module_param_named(tv_norm, nouveau_tv_norm, charp, 0400);
77
78MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
79 "\t\t0x1 mc, 0x2 video, 0x4 fb, 0x8 extdev,\n"
80 "\t\t0x10 crtc, 0x20 ramdac, 0x40 vgacrtc, 0x80 rmvio,\n"
81 "\t\t0x100 vgaattr, 0x200 EVO (G80+). ");
82int nouveau_reg_debug;
83module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
84
85int nouveau_fbpercrtc;
86#if 0
87module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
88#endif
89
90static struct pci_device_id pciidlist[] = {
91 {
92 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
93 .class = PCI_BASE_CLASS_DISPLAY << 16,
94 .class_mask = 0xff << 16,
95 },
96 {
97 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
98 .class = PCI_BASE_CLASS_DISPLAY << 16,
99 .class_mask = 0xff << 16,
100 },
101 {}
102};
103
104MODULE_DEVICE_TABLE(pci, pciidlist);
105
106static struct drm_driver driver;
107
108static int __devinit
109nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
110{
111 return drm_get_dev(pdev, ent, &driver);
112}
113
114static void
115nouveau_pci_remove(struct pci_dev *pdev)
116{
117 struct drm_device *dev = pci_get_drvdata(pdev);
118
119 drm_put_dev(dev);
120}
121
122static int
123nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
124{
125 struct drm_device *dev = pci_get_drvdata(pdev);
126 struct drm_nouveau_private *dev_priv = dev->dev_private;
127 struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
128 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
129 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
130 struct nouveau_channel *chan;
131 struct drm_crtc *crtc;
132 uint32_t fbdev_flags;
133 int ret, i;
134
135 if (!drm_core_check_feature(dev, DRIVER_MODESET))
136 return -ENODEV;
137
138 if (pm_state.event == PM_EVENT_PRETHAW)
139 return 0;
140
141 fbdev_flags = dev_priv->fbdev_info->flags;
142 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
143
144 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
145 struct nouveau_framebuffer *nouveau_fb;
146
147 nouveau_fb = nouveau_framebuffer(crtc->fb);
148 if (!nouveau_fb || !nouveau_fb->nvbo)
149 continue;
150
151 nouveau_bo_unpin(nouveau_fb->nvbo);
152 }
153
154 NV_INFO(dev, "Evicting buffers...\n");
155 ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
156
157 NV_INFO(dev, "Idling channels...\n");
158 for (i = 0; i < pfifo->channels; i++) {
159 struct nouveau_fence *fence = NULL;
160
161 chan = dev_priv->fifos[i];
162 if (!chan || (dev_priv->card_type >= NV_50 &&
163 chan == dev_priv->fifos[0]))
164 continue;
165
166 ret = nouveau_fence_new(chan, &fence, true);
167 if (ret == 0) {
168 ret = nouveau_fence_wait(fence, NULL, false, false);
169 nouveau_fence_unref((void *)&fence);
170 }
171
172 if (ret) {
173 NV_ERROR(dev, "Failed to idle channel %d for suspend\n",
174 chan->id);
175 }
176 }
177
178 pgraph->fifo_access(dev, false);
179 nouveau_wait_for_idle(dev);
180 pfifo->reassign(dev, false);
181 pfifo->disable(dev);
182 pfifo->unload_context(dev);
183 pgraph->unload_context(dev);
184
185 NV_INFO(dev, "Suspending GPU objects...\n");
186 ret = nouveau_gpuobj_suspend(dev);
187 if (ret) {
188 NV_ERROR(dev, "... failed: %d\n", ret);
189 goto out_abort;
190 }
191
192 ret = pinstmem->suspend(dev);
193 if (ret) {
194 NV_ERROR(dev, "... failed: %d\n", ret);
195 nouveau_gpuobj_suspend_cleanup(dev);
196 goto out_abort;
197 }
198
199 NV_INFO(dev, "And we're gone!\n");
200 pci_save_state(pdev);
201 if (pm_state.event == PM_EVENT_SUSPEND) {
202 pci_disable_device(pdev);
203 pci_set_power_state(pdev, PCI_D3hot);
204 }
205
206 acquire_console_sem();
207 fb_set_suspend(dev_priv->fbdev_info, 1);
208 release_console_sem();
209 dev_priv->fbdev_info->flags = fbdev_flags;
210 return 0;
211
212out_abort:
213 NV_INFO(dev, "Re-enabling acceleration..\n");
214 pfifo->enable(dev);
215 pfifo->reassign(dev, true);
216 pgraph->fifo_access(dev, true);
217 return ret;
218}
219
220static int
221nouveau_pci_resume(struct pci_dev *pdev)
222{
223 struct drm_device *dev = pci_get_drvdata(pdev);
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_engine *engine = &dev_priv->engine;
226 struct drm_crtc *crtc;
227 uint32_t fbdev_flags;
228 int ret, i;
229
230 if (!drm_core_check_feature(dev, DRIVER_MODESET))
231 return -ENODEV;
232
233 fbdev_flags = dev_priv->fbdev_info->flags;
234 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
235
236 NV_INFO(dev, "We're back, enabling device...\n");
237 pci_set_power_state(pdev, PCI_D0);
238 pci_restore_state(pdev);
239 if (pci_enable_device(pdev))
240 return -1;
241 pci_set_master(dev->pdev);
242
243 NV_INFO(dev, "POSTing device...\n");
244 ret = nouveau_run_vbios_init(dev);
245 if (ret)
246 return ret;
247
248 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
249 ret = nouveau_mem_init_agp(dev);
250 if (ret) {
251 NV_ERROR(dev, "error reinitialising AGP: %d\n", ret);
252 return ret;
253 }
254 }
255
256 NV_INFO(dev, "Reinitialising engines...\n");
257 engine->instmem.resume(dev);
258 engine->mc.init(dev);
259 engine->timer.init(dev);
260 engine->fb.init(dev);
261 engine->graph.init(dev);
262 engine->fifo.init(dev);
263
264 NV_INFO(dev, "Restoring GPU objects...\n");
265 nouveau_gpuobj_resume(dev);
266
267 nouveau_irq_postinstall(dev);
268
269 /* Re-write SKIPS, they'll have been lost over the suspend */
270 if (nouveau_vram_pushbuf) {
271 struct nouveau_channel *chan;
272 int j;
273
274 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
275 chan = dev_priv->fifos[i];
276 if (!chan)
277 continue;
278
279 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
280 nouveau_bo_wr32(chan->pushbuf_bo, i, 0);
281 }
282 }
283
284 NV_INFO(dev, "Restoring mode...\n");
285 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
286 struct nouveau_framebuffer *nouveau_fb;
287
288 nouveau_fb = nouveau_framebuffer(crtc->fb);
289 if (!nouveau_fb || !nouveau_fb->nvbo)
290 continue;
291
292 nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
293 }
294
295 if (dev_priv->card_type < NV_50) {
296 nv04_display_restore(dev);
297 NVLockVgaCrtcs(dev, false);
298 } else
299 nv50_display_init(dev);
300
301 /* Force CLUT to get re-loaded during modeset */
302 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
303 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
304
305 nv_crtc->lut.depth = 0;
306 }
307
308 acquire_console_sem();
309 fb_set_suspend(dev_priv->fbdev_info, 0);
310 release_console_sem();
311
312 nouveau_fbcon_zfill(dev);
313
314 drm_helper_resume_force_mode(dev);
315 dev_priv->fbdev_info->flags = fbdev_flags;
316 return 0;
317}
318
319static struct drm_driver driver = {
320 .driver_features =
321 DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
322 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
323 .load = nouveau_load,
324 .firstopen = nouveau_firstopen,
325 .lastclose = nouveau_lastclose,
326 .unload = nouveau_unload,
327 .preclose = nouveau_preclose,
328#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
329 .debugfs_init = nouveau_debugfs_init,
330 .debugfs_cleanup = nouveau_debugfs_takedown,
331#endif
332 .irq_preinstall = nouveau_irq_preinstall,
333 .irq_postinstall = nouveau_irq_postinstall,
334 .irq_uninstall = nouveau_irq_uninstall,
335 .irq_handler = nouveau_irq_handler,
336 .reclaim_buffers = drm_core_reclaim_buffers,
337 .get_map_ofs = drm_core_get_map_ofs,
338 .get_reg_ofs = drm_core_get_reg_ofs,
339 .ioctls = nouveau_ioctls,
340 .fops = {
341 .owner = THIS_MODULE,
342 .open = drm_open,
343 .release = drm_release,
344 .ioctl = drm_ioctl,
345 .mmap = nouveau_ttm_mmap,
346 .poll = drm_poll,
347 .fasync = drm_fasync,
348#if defined(CONFIG_COMPAT)
349 .compat_ioctl = nouveau_compat_ioctl,
350#endif
351 },
352 .pci_driver = {
353 .name = DRIVER_NAME,
354 .id_table = pciidlist,
355 .probe = nouveau_pci_probe,
356 .remove = nouveau_pci_remove,
357 .suspend = nouveau_pci_suspend,
358 .resume = nouveau_pci_resume
359 },
360
361 .gem_init_object = nouveau_gem_object_new,
362 .gem_free_object = nouveau_gem_object_del,
363
364 .name = DRIVER_NAME,
365 .desc = DRIVER_DESC,
366#ifdef GIT_REVISION
367 .date = GIT_REVISION,
368#else
369 .date = DRIVER_DATE,
370#endif
371 .major = DRIVER_MAJOR,
372 .minor = DRIVER_MINOR,
373 .patchlevel = DRIVER_PATCHLEVEL,
374};
375
376static int __init nouveau_init(void)
377{
378 driver.num_ioctls = nouveau_max_ioctl;
379
380 if (nouveau_modeset == -1) {
381#ifdef CONFIG_VGA_CONSOLE
382 if (vgacon_text_force())
383 nouveau_modeset = 0;
384 else
385#endif
386 nouveau_modeset = 1;
387 }
388
389 if (nouveau_modeset == 1)
390 driver.driver_features |= DRIVER_MODESET;
391
392 return drm_init(&driver);
393}
394
395static void __exit nouveau_exit(void)
396{
397 drm_exit(&driver);
398}
399
400module_init(nouveau_init);
401module_exit(nouveau_exit);
402
403MODULE_AUTHOR(DRIVER_AUTHOR);
404MODULE_DESCRIPTION(DRIVER_DESC);
405MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 000000000000..88b4c7b77e7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,1286 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRV_H__
26#define __NOUVEAU_DRV_H__
27
28#define DRIVER_AUTHOR "Stephane Marchesin"
29#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
30
31#define DRIVER_NAME "nouveau"
32#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
33#define DRIVER_DATE "20090420"
34
35#define DRIVER_MAJOR 0
36#define DRIVER_MINOR 0
37#define DRIVER_PATCHLEVEL 15
38
39#define NOUVEAU_FAMILY 0x0000FFFF
40#define NOUVEAU_FLAGS 0xFFFF0000
41
42#include "ttm/ttm_bo_api.h"
43#include "ttm/ttm_bo_driver.h"
44#include "ttm/ttm_placement.h"
45#include "ttm/ttm_memory.h"
46#include "ttm/ttm_module.h"
47
48struct nouveau_fpriv {
49 struct ttm_object_file *tfile;
50};
51
52#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
53
54#include "nouveau_drm.h"
55#include "nouveau_reg.h"
56#include "nouveau_bios.h"
57
58#define MAX_NUM_DCB_ENTRIES 16
59
60#define NOUVEAU_MAX_CHANNEL_NR 128
61
62#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
63#define NV50_VM_BLOCK (512*1024*1024ULL)
64#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
65
66struct nouveau_bo {
67 struct ttm_buffer_object bo;
68 struct ttm_placement placement;
69 u32 placements[3];
70 struct ttm_bo_kmap_obj kmap;
71 struct list_head head;
72
73 /* protected by ttm_bo_reserve() */
74 struct drm_file *reserved_by;
75 struct list_head entry;
76 int pbbo_index;
77
78 struct nouveau_channel *channel;
79
80 bool mappable;
81 bool no_vm;
82
83 uint32_t tile_mode;
84 uint32_t tile_flags;
85
86 struct drm_gem_object *gem;
87 struct drm_file *cpu_filp;
88 int pin_refcnt;
89};
90
91static inline struct nouveau_bo *
92nouveau_bo(struct ttm_buffer_object *bo)
93{
94 return container_of(bo, struct nouveau_bo, bo);
95}
96
97static inline struct nouveau_bo *
98nouveau_gem_object(struct drm_gem_object *gem)
99{
100 return gem ? gem->driver_private : NULL;
101}
102
103/* TODO: submit equivalent to TTM generic API upstream? */
104static inline void __iomem *
105nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
106{
107 bool is_iomem;
108 void __iomem *ioptr = (void __force __iomem *)ttm_kmap_obj_virtual(
109 &nvbo->kmap, &is_iomem);
110 WARN_ON_ONCE(ioptr && !is_iomem);
111 return ioptr;
112}
113
114struct mem_block {
115 struct mem_block *next;
116 struct mem_block *prev;
117 uint64_t start;
118 uint64_t size;
119 struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
120};
121
122enum nouveau_flags {
123 NV_NFORCE = 0x10000000,
124 NV_NFORCE2 = 0x20000000
125};
126
127#define NVOBJ_ENGINE_SW 0
128#define NVOBJ_ENGINE_GR 1
129#define NVOBJ_ENGINE_DISPLAY 2
130#define NVOBJ_ENGINE_INT 0xdeadbeef
131
132#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
133#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
134#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
135#define NVOBJ_FLAG_FAKE (1 << 3)
136struct nouveau_gpuobj {
137 struct list_head list;
138
139 struct nouveau_channel *im_channel;
140 struct mem_block *im_pramin;
141 struct nouveau_bo *im_backing;
142 uint32_t im_backing_start;
143 uint32_t *im_backing_suspend;
144 int im_bound;
145
146 uint32_t flags;
147 int refcount;
148
149 uint32_t engine;
150 uint32_t class;
151
152 void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
153 void *priv;
154};
155
156struct nouveau_gpuobj_ref {
157 struct list_head list;
158
159 struct nouveau_gpuobj *gpuobj;
160 uint32_t instance;
161
162 struct nouveau_channel *channel;
163 int handle;
164};
165
166struct nouveau_channel {
167 struct drm_device *dev;
168 int id;
169
170 /* owner of this fifo */
171 struct drm_file *file_priv;
172 /* mapping of the fifo itself */
173 struct drm_local_map *map;
174
175 /* mapping of the regs controling the fifo */
176 void __iomem *user;
177 uint32_t user_get;
178 uint32_t user_put;
179
180 /* Fencing */
181 struct {
182 /* lock protects the pending list only */
183 spinlock_t lock;
184 struct list_head pending;
185 uint32_t sequence;
186 uint32_t sequence_ack;
187 uint32_t last_sequence_irq;
188 } fence;
189
190 /* DMA push buffer */
191 struct nouveau_gpuobj_ref *pushbuf;
192 struct nouveau_bo *pushbuf_bo;
193 uint32_t pushbuf_base;
194
195 /* Notifier memory */
196 struct nouveau_bo *notifier_bo;
197 struct mem_block *notifier_heap;
198
199 /* PFIFO context */
200 struct nouveau_gpuobj_ref *ramfc;
201 struct nouveau_gpuobj_ref *cache;
202
203 /* PGRAPH context */
204 /* XXX may be merge 2 pointers as private data ??? */
205 struct nouveau_gpuobj_ref *ramin_grctx;
206 void *pgraph_ctx;
207
208 /* NV50 VM */
209 struct nouveau_gpuobj *vm_pd;
210 struct nouveau_gpuobj_ref *vm_gart_pt;
211 struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
212
213 /* Objects */
214 struct nouveau_gpuobj_ref *ramin; /* Private instmem */
215 struct mem_block *ramin_heap; /* Private PRAMIN heap */
216 struct nouveau_gpuobj_ref *ramht; /* Hash table */
217 struct list_head ramht_refs; /* Objects referenced by RAMHT */
218
219 /* GPU object info for stuff used in-kernel (mm_enabled) */
220 uint32_t m2mf_ntfy;
221 uint32_t vram_handle;
222 uint32_t gart_handle;
223 bool accel_done;
224
225 /* Push buffer state (only for drm's channel on !mm_enabled) */
226 struct {
227 int max;
228 int free;
229 int cur;
230 int put;
231 /* access via pushbuf_bo */
232 } dma;
233
234 uint32_t sw_subchannel[8];
235
236 struct {
237 struct nouveau_gpuobj *vblsem;
238 uint32_t vblsem_offset;
239 uint32_t vblsem_rval;
240 struct list_head vbl_wait;
241 } nvsw;
242
243 struct {
244 bool active;
245 char name[32];
246 struct drm_info_list info;
247 } debugfs;
248};
249
250struct nouveau_instmem_engine {
251 void *priv;
252
253 int (*init)(struct drm_device *dev);
254 void (*takedown)(struct drm_device *dev);
255 int (*suspend)(struct drm_device *dev);
256 void (*resume)(struct drm_device *dev);
257
258 int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
259 uint32_t *size);
260 void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
261 int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
262 int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
263 void (*prepare_access)(struct drm_device *, bool write);
264 void (*finish_access)(struct drm_device *);
265};
266
267struct nouveau_mc_engine {
268 int (*init)(struct drm_device *dev);
269 void (*takedown)(struct drm_device *dev);
270};
271
272struct nouveau_timer_engine {
273 int (*init)(struct drm_device *dev);
274 void (*takedown)(struct drm_device *dev);
275 uint64_t (*read)(struct drm_device *dev);
276};
277
278struct nouveau_fb_engine {
279 int (*init)(struct drm_device *dev);
280 void (*takedown)(struct drm_device *dev);
281};
282
283struct nouveau_fifo_engine {
284 void *priv;
285
286 int channels;
287
288 int (*init)(struct drm_device *);
289 void (*takedown)(struct drm_device *);
290
291 void (*disable)(struct drm_device *);
292 void (*enable)(struct drm_device *);
293 bool (*reassign)(struct drm_device *, bool enable);
294
295 int (*channel_id)(struct drm_device *);
296
297 int (*create_context)(struct nouveau_channel *);
298 void (*destroy_context)(struct nouveau_channel *);
299 int (*load_context)(struct nouveau_channel *);
300 int (*unload_context)(struct drm_device *);
301};
302
303struct nouveau_pgraph_object_method {
304 int id;
305 int (*exec)(struct nouveau_channel *chan, int grclass, int mthd,
306 uint32_t data);
307};
308
309struct nouveau_pgraph_object_class {
310 int id;
311 bool software;
312 struct nouveau_pgraph_object_method *methods;
313};
314
315struct nouveau_pgraph_engine {
316 struct nouveau_pgraph_object_class *grclass;
317 bool accel_blocked;
318 void *ctxprog;
319 void *ctxvals;
320
321 int (*init)(struct drm_device *);
322 void (*takedown)(struct drm_device *);
323
324 void (*fifo_access)(struct drm_device *, bool);
325
326 struct nouveau_channel *(*channel)(struct drm_device *);
327 int (*create_context)(struct nouveau_channel *);
328 void (*destroy_context)(struct nouveau_channel *);
329 int (*load_context)(struct nouveau_channel *);
330 int (*unload_context)(struct drm_device *);
331};
332
333struct nouveau_engine {
334 struct nouveau_instmem_engine instmem;
335 struct nouveau_mc_engine mc;
336 struct nouveau_timer_engine timer;
337 struct nouveau_fb_engine fb;
338 struct nouveau_pgraph_engine graph;
339 struct nouveau_fifo_engine fifo;
340};
341
342struct nouveau_pll_vals {
343 union {
344 struct {
345#ifdef __BIG_ENDIAN
346 uint8_t N1, M1, N2, M2;
347#else
348 uint8_t M1, N1, M2, N2;
349#endif
350 };
351 struct {
352 uint16_t NM1, NM2;
353 } __attribute__((packed));
354 };
355 int log2P;
356
357 int refclk;
358};
359
360enum nv04_fp_display_regs {
361 FP_DISPLAY_END,
362 FP_TOTAL,
363 FP_CRTC,
364 FP_SYNC_START,
365 FP_SYNC_END,
366 FP_VALID_START,
367 FP_VALID_END
368};
369
370struct nv04_crtc_reg {
371 unsigned char MiscOutReg; /* */
372 uint8_t CRTC[0x9f];
373 uint8_t CR58[0x10];
374 uint8_t Sequencer[5];
375 uint8_t Graphics[9];
376 uint8_t Attribute[21];
377 unsigned char DAC[768]; /* Internal Colorlookuptable */
378
379 /* PCRTC regs */
380 uint32_t fb_start;
381 uint32_t crtc_cfg;
382 uint32_t cursor_cfg;
383 uint32_t gpio_ext;
384 uint32_t crtc_830;
385 uint32_t crtc_834;
386 uint32_t crtc_850;
387 uint32_t crtc_eng_ctrl;
388
389 /* PRAMDAC regs */
390 uint32_t nv10_cursync;
391 struct nouveau_pll_vals pllvals;
392 uint32_t ramdac_gen_ctrl;
393 uint32_t ramdac_630;
394 uint32_t ramdac_634;
395 uint32_t tv_setup;
396 uint32_t tv_vtotal;
397 uint32_t tv_vskew;
398 uint32_t tv_vsync_delay;
399 uint32_t tv_htotal;
400 uint32_t tv_hskew;
401 uint32_t tv_hsync_delay;
402 uint32_t tv_hsync_delay2;
403 uint32_t fp_horiz_regs[7];
404 uint32_t fp_vert_regs[7];
405 uint32_t dither;
406 uint32_t fp_control;
407 uint32_t dither_regs[6];
408 uint32_t fp_debug_0;
409 uint32_t fp_debug_1;
410 uint32_t fp_debug_2;
411 uint32_t fp_margin_color;
412 uint32_t ramdac_8c0;
413 uint32_t ramdac_a20;
414 uint32_t ramdac_a24;
415 uint32_t ramdac_a34;
416 uint32_t ctv_regs[38];
417};
418
419struct nv04_output_reg {
420 uint32_t output;
421 int head;
422};
423
424struct nv04_mode_state {
425 uint32_t bpp;
426 uint32_t width;
427 uint32_t height;
428 uint32_t interlace;
429 uint32_t repaint0;
430 uint32_t repaint1;
431 uint32_t screen;
432 uint32_t scale;
433 uint32_t dither;
434 uint32_t extra;
435 uint32_t fifo;
436 uint32_t pixel;
437 uint32_t horiz;
438 int arbitration0;
439 int arbitration1;
440 uint32_t pll;
441 uint32_t pllB;
442 uint32_t vpll;
443 uint32_t vpll2;
444 uint32_t vpllB;
445 uint32_t vpll2B;
446 uint32_t pllsel;
447 uint32_t sel_clk;
448 uint32_t general;
449 uint32_t crtcOwner;
450 uint32_t head;
451 uint32_t head2;
452 uint32_t cursorConfig;
453 uint32_t cursor0;
454 uint32_t cursor1;
455 uint32_t cursor2;
456 uint32_t timingH;
457 uint32_t timingV;
458 uint32_t displayV;
459 uint32_t crtcSync;
460
461 struct nv04_crtc_reg crtc_reg[2];
462};
463
464enum nouveau_card_type {
465 NV_04 = 0x00,
466 NV_10 = 0x10,
467 NV_20 = 0x20,
468 NV_30 = 0x30,
469 NV_40 = 0x40,
470 NV_50 = 0x50,
471};
472
473struct drm_nouveau_private {
474 struct drm_device *dev;
475 enum {
476 NOUVEAU_CARD_INIT_DOWN,
477 NOUVEAU_CARD_INIT_DONE,
478 NOUVEAU_CARD_INIT_FAILED
479 } init_state;
480
481 /* the card type, takes NV_* as values */
482 enum nouveau_card_type card_type;
483 /* exact chipset, derived from NV_PMC_BOOT_0 */
484 int chipset;
485 int flags;
486
487 void __iomem *mmio;
488 void __iomem *ramin;
489 uint32_t ramin_size;
490
491 struct workqueue_struct *wq;
492 struct work_struct irq_work;
493
494 struct list_head vbl_waiting;
495
496 struct {
497 struct ttm_global_reference mem_global_ref;
498 struct ttm_bo_global_ref bo_global_ref;
499 struct ttm_bo_device bdev;
500 spinlock_t bo_list_lock;
501 struct list_head bo_list;
502 atomic_t validate_sequence;
503 } ttm;
504
505 struct fb_info *fbdev_info;
506
507 int fifo_alloc_count;
508 struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
509
510 struct nouveau_engine engine;
511 struct nouveau_channel *channel;
512
513 /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
514 struct nouveau_gpuobj *ramht;
515 uint32_t ramin_rsvd_vram;
516 uint32_t ramht_offset;
517 uint32_t ramht_size;
518 uint32_t ramht_bits;
519 uint32_t ramfc_offset;
520 uint32_t ramfc_size;
521 uint32_t ramro_offset;
522 uint32_t ramro_size;
523
524 /* base physical adresses */
525 uint64_t fb_phys;
526 uint64_t fb_available_size;
527 uint64_t fb_mappable_pages;
528 uint64_t fb_aper_free;
529
530 struct {
531 enum {
532 NOUVEAU_GART_NONE = 0,
533 NOUVEAU_GART_AGP,
534 NOUVEAU_GART_SGDMA
535 } type;
536 uint64_t aper_base;
537 uint64_t aper_size;
538 uint64_t aper_free;
539
540 struct nouveau_gpuobj *sg_ctxdma;
541 struct page *sg_dummy_page;
542 dma_addr_t sg_dummy_bus;
543
544 /* nottm hack */
545 struct drm_ttm_backend *sg_be;
546 unsigned long sg_handle;
547 } gart_info;
548
549 /* G8x/G9x virtual address space */
550 uint64_t vm_gart_base;
551 uint64_t vm_gart_size;
552 uint64_t vm_vram_base;
553 uint64_t vm_vram_size;
554 uint64_t vm_end;
555 struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
556 int vm_vram_pt_nr;
557
558 /* the mtrr covering the FB */
559 int fb_mtrr;
560
561 struct mem_block *ramin_heap;
562
563 /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
564 uint32_t ctx_table_size;
565 struct nouveau_gpuobj_ref *ctx_table;
566
567 struct list_head gpuobj_list;
568
569 struct nvbios VBIOS;
570 struct nouveau_bios_info *vbios;
571
572 struct nv04_mode_state mode_reg;
573 struct nv04_mode_state saved_reg;
574 uint32_t saved_vga_font[4][16384];
575 uint32_t crtc_owner;
576 uint32_t dac_users[4];
577
578 struct nouveau_suspend_resume {
579 uint32_t fifo_mode;
580 uint32_t graph_ctx_control;
581 uint32_t graph_state;
582 uint32_t *ramin_copy;
583 uint64_t ramin_size;
584 } susres;
585
586 struct backlight_device *backlight;
587 bool acpi_dsm;
588
589 struct nouveau_channel *evo;
590
591 struct {
592 struct dentry *channel_root;
593 } debugfs;
594};
595
596static inline struct drm_nouveau_private *
597nouveau_bdev(struct ttm_bo_device *bd)
598{
599 return container_of(bd, struct drm_nouveau_private, ttm.bdev);
600}
601
602static inline int
603nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
604{
605 struct nouveau_bo *prev;
606
607 if (!pnvbo)
608 return -EINVAL;
609 prev = *pnvbo;
610
611 *pnvbo = ref ? nouveau_bo(ttm_bo_reference(&ref->bo)) : NULL;
612 if (prev) {
613 struct ttm_buffer_object *bo = &prev->bo;
614
615 ttm_bo_unref(&bo);
616 }
617
618 return 0;
619}
620
621#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
622 struct drm_nouveau_private *nv = dev->dev_private; \
623 if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
624 NV_ERROR(dev, "called without init\n"); \
625 return -EINVAL; \
626 } \
627} while (0)
628
629#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
630 struct drm_nouveau_private *nv = dev->dev_private; \
631 if (!nouveau_channel_owner(dev, (cl), (id))) { \
632 NV_ERROR(dev, "pid %d doesn't own channel %d\n", \
633 DRM_CURRENTPID, (id)); \
634 return -EPERM; \
635 } \
636 (ch) = nv->fifos[(id)]; \
637} while (0)
638
639/* nouveau_drv.c */
640extern int nouveau_noagp;
641extern int nouveau_duallink;
642extern int nouveau_uscript_lvds;
643extern int nouveau_uscript_tmds;
644extern int nouveau_vram_pushbuf;
645extern int nouveau_vram_notify;
646extern int nouveau_fbpercrtc;
647extern char *nouveau_tv_norm;
648extern int nouveau_reg_debug;
649extern char *nouveau_vbios;
650
651/* nouveau_state.c */
652extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
653extern int nouveau_load(struct drm_device *, unsigned long flags);
654extern int nouveau_firstopen(struct drm_device *);
655extern void nouveau_lastclose(struct drm_device *);
656extern int nouveau_unload(struct drm_device *);
657extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
658 struct drm_file *);
659extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
660 struct drm_file *);
661extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
662 uint32_t reg, uint32_t mask, uint32_t val);
663extern bool nouveau_wait_for_idle(struct drm_device *);
664extern int nouveau_card_init(struct drm_device *);
665extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
666 struct drm_file *);
667extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
668 struct drm_file *);
669extern int nouveau_ioctl_resume(struct drm_device *, void *data,
670 struct drm_file *);
671
672/* nouveau_mem.c */
673extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
674 uint64_t size);
675extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
676 uint64_t size, int align2,
677 struct drm_file *, int tail);
678extern void nouveau_mem_takedown(struct mem_block **heap);
679extern void nouveau_mem_free_block(struct mem_block *);
680extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
681extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
682extern int nouveau_mem_init(struct drm_device *);
683extern int nouveau_mem_init_agp(struct drm_device *);
684extern void nouveau_mem_close(struct drm_device *);
685extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
686 uint32_t size, uint32_t flags,
687 uint64_t phys);
688extern void nv50_mem_vm_unbind(struct drm_device *, uint64_t virt,
689 uint32_t size);
690
691/* nouveau_notifier.c */
692extern int nouveau_notifier_init_channel(struct nouveau_channel *);
693extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
694extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
695 int cout, uint32_t *offset);
696extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
697extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
698 struct drm_file *);
699extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
700 struct drm_file *);
701
702/* nouveau_channel.c */
703extern struct drm_ioctl_desc nouveau_ioctls[];
704extern int nouveau_max_ioctl;
705extern void nouveau_channel_cleanup(struct drm_device *, struct drm_file *);
706extern int nouveau_channel_owner(struct drm_device *, struct drm_file *,
707 int channel);
708extern int nouveau_channel_alloc(struct drm_device *dev,
709 struct nouveau_channel **chan,
710 struct drm_file *file_priv,
711 uint32_t fb_ctxdma, uint32_t tt_ctxdma);
712extern void nouveau_channel_free(struct nouveau_channel *);
713extern int nouveau_channel_idle(struct nouveau_channel *chan);
714
715/* nouveau_object.c */
716extern int nouveau_gpuobj_early_init(struct drm_device *);
717extern int nouveau_gpuobj_init(struct drm_device *);
718extern void nouveau_gpuobj_takedown(struct drm_device *);
719extern void nouveau_gpuobj_late_takedown(struct drm_device *);
720extern int nouveau_gpuobj_suspend(struct drm_device *dev);
721extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
722extern void nouveau_gpuobj_resume(struct drm_device *dev);
723extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
724 uint32_t vram_h, uint32_t tt_h);
725extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
726extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
727 uint32_t size, int align, uint32_t flags,
728 struct nouveau_gpuobj **);
729extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
730extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
731 uint32_t handle, struct nouveau_gpuobj *,
732 struct nouveau_gpuobj_ref **);
733extern int nouveau_gpuobj_ref_del(struct drm_device *,
734 struct nouveau_gpuobj_ref **);
735extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
736 struct nouveau_gpuobj_ref **ref_ret);
737extern int nouveau_gpuobj_new_ref(struct drm_device *,
738 struct nouveau_channel *alloc_chan,
739 struct nouveau_channel *ref_chan,
740 uint32_t handle, uint32_t size, int align,
741 uint32_t flags, struct nouveau_gpuobj_ref **);
742extern int nouveau_gpuobj_new_fake(struct drm_device *,
743 uint32_t p_offset, uint32_t b_offset,
744 uint32_t size, uint32_t flags,
745 struct nouveau_gpuobj **,
746 struct nouveau_gpuobj_ref**);
747extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
748 uint64_t offset, uint64_t size, int access,
749 int target, struct nouveau_gpuobj **);
750extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
751 uint64_t offset, uint64_t size,
752 int access, struct nouveau_gpuobj **,
753 uint32_t *o_ret);
754extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
755 struct nouveau_gpuobj **);
756extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
757 struct drm_file *);
758extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
759 struct drm_file *);
760
761/* nouveau_irq.c */
762extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
763extern void nouveau_irq_preinstall(struct drm_device *);
764extern int nouveau_irq_postinstall(struct drm_device *);
765extern void nouveau_irq_uninstall(struct drm_device *);
766
767/* nouveau_sgdma.c */
768extern int nouveau_sgdma_init(struct drm_device *);
769extern void nouveau_sgdma_takedown(struct drm_device *);
770extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
771 uint32_t *page);
772extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
773
774/* nouveau_debugfs.c */
775#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
776extern int nouveau_debugfs_init(struct drm_minor *);
777extern void nouveau_debugfs_takedown(struct drm_minor *);
778extern int nouveau_debugfs_channel_init(struct nouveau_channel *);
779extern void nouveau_debugfs_channel_fini(struct nouveau_channel *);
780#else
781static inline int
782nouveau_debugfs_init(struct drm_minor *minor)
783{
784 return 0;
785}
786
787static inline void nouveau_debugfs_takedown(struct drm_minor *minor)
788{
789}
790
791static inline int
792nouveau_debugfs_channel_init(struct nouveau_channel *chan)
793{
794 return 0;
795}
796
797static inline void
798nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
799{
800}
801#endif
802
803/* nouveau_dma.c */
804extern int nouveau_dma_init(struct nouveau_channel *);
805extern int nouveau_dma_wait(struct nouveau_channel *, int size);
806
807/* nouveau_acpi.c */
808#ifdef CONFIG_ACPI
809extern int nouveau_hybrid_setup(struct drm_device *dev);
810extern bool nouveau_dsm_probe(struct drm_device *dev);
811#else
812static inline int nouveau_hybrid_setup(struct drm_device *dev)
813{
814 return 0;
815}
816static inline bool nouveau_dsm_probe(struct drm_device *dev)
817{
818 return false;
819}
820#endif
821
822/* nouveau_backlight.c */
823#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
824extern int nouveau_backlight_init(struct drm_device *);
825extern void nouveau_backlight_exit(struct drm_device *);
826#else
827static inline int nouveau_backlight_init(struct drm_device *dev)
828{
829 return 0;
830}
831
832static inline void nouveau_backlight_exit(struct drm_device *dev) { }
833#endif
834
835/* nouveau_bios.c */
836extern int nouveau_bios_init(struct drm_device *);
837extern void nouveau_bios_takedown(struct drm_device *dev);
838extern int nouveau_run_vbios_init(struct drm_device *);
839extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
840 struct dcb_entry *);
841extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
842 enum dcb_gpio_tag);
843extern struct dcb_connector_table_entry *
844nouveau_bios_connector_entry(struct drm_device *, int index);
845extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
846 struct pll_lims *);
847extern int nouveau_bios_run_display_table(struct drm_device *,
848 struct dcb_entry *,
849 uint32_t script, int pxclk);
850extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
851 int *length);
852extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
853extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
854extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
855 bool *dl, bool *if_is_24bit);
856extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
857 int head, int pxclk);
858extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
859 enum LVDS_script, int pxclk);
860
861/* nouveau_ttm.c */
862int nouveau_ttm_global_init(struct drm_nouveau_private *);
863void nouveau_ttm_global_release(struct drm_nouveau_private *);
864int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
865
866/* nouveau_dp.c */
867int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
868 uint8_t *data, int data_nr);
869bool nouveau_dp_detect(struct drm_encoder *);
870bool nouveau_dp_link_train(struct drm_encoder *);
871
872/* nv04_fb.c */
873extern int nv04_fb_init(struct drm_device *);
874extern void nv04_fb_takedown(struct drm_device *);
875
876/* nv10_fb.c */
877extern int nv10_fb_init(struct drm_device *);
878extern void nv10_fb_takedown(struct drm_device *);
879
880/* nv40_fb.c */
881extern int nv40_fb_init(struct drm_device *);
882extern void nv40_fb_takedown(struct drm_device *);
883
884/* nv04_fifo.c */
885extern int nv04_fifo_init(struct drm_device *);
886extern void nv04_fifo_disable(struct drm_device *);
887extern void nv04_fifo_enable(struct drm_device *);
888extern bool nv04_fifo_reassign(struct drm_device *, bool);
889extern int nv04_fifo_channel_id(struct drm_device *);
890extern int nv04_fifo_create_context(struct nouveau_channel *);
891extern void nv04_fifo_destroy_context(struct nouveau_channel *);
892extern int nv04_fifo_load_context(struct nouveau_channel *);
893extern int nv04_fifo_unload_context(struct drm_device *);
894
895/* nv10_fifo.c */
896extern int nv10_fifo_init(struct drm_device *);
897extern int nv10_fifo_channel_id(struct drm_device *);
898extern int nv10_fifo_create_context(struct nouveau_channel *);
899extern void nv10_fifo_destroy_context(struct nouveau_channel *);
900extern int nv10_fifo_load_context(struct nouveau_channel *);
901extern int nv10_fifo_unload_context(struct drm_device *);
902
903/* nv40_fifo.c */
904extern int nv40_fifo_init(struct drm_device *);
905extern int nv40_fifo_create_context(struct nouveau_channel *);
906extern void nv40_fifo_destroy_context(struct nouveau_channel *);
907extern int nv40_fifo_load_context(struct nouveau_channel *);
908extern int nv40_fifo_unload_context(struct drm_device *);
909
910/* nv50_fifo.c */
911extern int nv50_fifo_init(struct drm_device *);
912extern void nv50_fifo_takedown(struct drm_device *);
913extern int nv50_fifo_channel_id(struct drm_device *);
914extern int nv50_fifo_create_context(struct nouveau_channel *);
915extern void nv50_fifo_destroy_context(struct nouveau_channel *);
916extern int nv50_fifo_load_context(struct nouveau_channel *);
917extern int nv50_fifo_unload_context(struct drm_device *);
918
919/* nv04_graph.c */
920extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
921extern int nv04_graph_init(struct drm_device *);
922extern void nv04_graph_takedown(struct drm_device *);
923extern void nv04_graph_fifo_access(struct drm_device *, bool);
924extern struct nouveau_channel *nv04_graph_channel(struct drm_device *);
925extern int nv04_graph_create_context(struct nouveau_channel *);
926extern void nv04_graph_destroy_context(struct nouveau_channel *);
927extern int nv04_graph_load_context(struct nouveau_channel *);
928extern int nv04_graph_unload_context(struct drm_device *);
929extern void nv04_graph_context_switch(struct drm_device *);
930
931/* nv10_graph.c */
932extern struct nouveau_pgraph_object_class nv10_graph_grclass[];
933extern int nv10_graph_init(struct drm_device *);
934extern void nv10_graph_takedown(struct drm_device *);
935extern struct nouveau_channel *nv10_graph_channel(struct drm_device *);
936extern int nv10_graph_create_context(struct nouveau_channel *);
937extern void nv10_graph_destroy_context(struct nouveau_channel *);
938extern int nv10_graph_load_context(struct nouveau_channel *);
939extern int nv10_graph_unload_context(struct drm_device *);
940extern void nv10_graph_context_switch(struct drm_device *);
941
942/* nv20_graph.c */
943extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
944extern struct nouveau_pgraph_object_class nv30_graph_grclass[];
945extern int nv20_graph_create_context(struct nouveau_channel *);
946extern void nv20_graph_destroy_context(struct nouveau_channel *);
947extern int nv20_graph_load_context(struct nouveau_channel *);
948extern int nv20_graph_unload_context(struct drm_device *);
949extern int nv20_graph_init(struct drm_device *);
950extern void nv20_graph_takedown(struct drm_device *);
951extern int nv30_graph_init(struct drm_device *);
952
953/* nv40_graph.c */
954extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
955extern int nv40_graph_init(struct drm_device *);
956extern void nv40_graph_takedown(struct drm_device *);
957extern struct nouveau_channel *nv40_graph_channel(struct drm_device *);
958extern int nv40_graph_create_context(struct nouveau_channel *);
959extern void nv40_graph_destroy_context(struct nouveau_channel *);
960extern int nv40_graph_load_context(struct nouveau_channel *);
961extern int nv40_graph_unload_context(struct drm_device *);
962extern int nv40_grctx_init(struct drm_device *);
963extern void nv40_grctx_fini(struct drm_device *);
964extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
965
966/* nv50_graph.c */
967extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
968extern int nv50_graph_init(struct drm_device *);
969extern void nv50_graph_takedown(struct drm_device *);
970extern void nv50_graph_fifo_access(struct drm_device *, bool);
971extern struct nouveau_channel *nv50_graph_channel(struct drm_device *);
972extern int nv50_graph_create_context(struct nouveau_channel *);
973extern void nv50_graph_destroy_context(struct nouveau_channel *);
974extern int nv50_graph_load_context(struct nouveau_channel *);
975extern int nv50_graph_unload_context(struct drm_device *);
976extern void nv50_graph_context_switch(struct drm_device *);
977
978/* nv04_instmem.c */
979extern int nv04_instmem_init(struct drm_device *);
980extern void nv04_instmem_takedown(struct drm_device *);
981extern int nv04_instmem_suspend(struct drm_device *);
982extern void nv04_instmem_resume(struct drm_device *);
983extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
984 uint32_t *size);
985extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
986extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
987extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
988extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
989extern void nv04_instmem_finish_access(struct drm_device *);
990
991/* nv50_instmem.c */
992extern int nv50_instmem_init(struct drm_device *);
993extern void nv50_instmem_takedown(struct drm_device *);
994extern int nv50_instmem_suspend(struct drm_device *);
995extern void nv50_instmem_resume(struct drm_device *);
996extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
997 uint32_t *size);
998extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
999extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
1000extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
1001extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
1002extern void nv50_instmem_finish_access(struct drm_device *);
1003
1004/* nv04_mc.c */
1005extern int nv04_mc_init(struct drm_device *);
1006extern void nv04_mc_takedown(struct drm_device *);
1007
1008/* nv40_mc.c */
1009extern int nv40_mc_init(struct drm_device *);
1010extern void nv40_mc_takedown(struct drm_device *);
1011
1012/* nv50_mc.c */
1013extern int nv50_mc_init(struct drm_device *);
1014extern void nv50_mc_takedown(struct drm_device *);
1015
1016/* nv04_timer.c */
1017extern int nv04_timer_init(struct drm_device *);
1018extern uint64_t nv04_timer_read(struct drm_device *);
1019extern void nv04_timer_takedown(struct drm_device *);
1020
1021extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
1022 unsigned long arg);
1023
1024/* nv04_dac.c */
1025extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
1026extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
1027 struct drm_connector *connector);
1028extern int nv04_dac_output_offset(struct drm_encoder *encoder);
1029extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
1030
1031/* nv04_dfp.c */
1032extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
1033extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
1034extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
1035 int head, bool dl);
1036extern void nv04_dfp_disable(struct drm_device *dev, int head);
1037extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
1038
1039/* nv04_tv.c */
1040extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
1041extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1042
1043/* nv17_tv.c */
1044extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
1045extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
1046 struct drm_connector *connector,
1047 uint32_t pin_mask);
1048
1049/* nv04_display.c */
1050extern int nv04_display_create(struct drm_device *);
1051extern void nv04_display_destroy(struct drm_device *);
1052extern void nv04_display_restore(struct drm_device *);
1053
1054/* nv04_crtc.c */
1055extern int nv04_crtc_create(struct drm_device *, int index);
1056
1057/* nouveau_bo.c */
1058extern struct ttm_bo_driver nouveau_bo_driver;
1059extern int nouveau_bo_new(struct drm_device *, struct nouveau_channel *,
1060 int size, int align, uint32_t flags,
1061 uint32_t tile_mode, uint32_t tile_flags,
1062 bool no_vm, bool mappable, struct nouveau_bo **);
1063extern int nouveau_bo_pin(struct nouveau_bo *, uint32_t flags);
1064extern int nouveau_bo_unpin(struct nouveau_bo *);
1065extern int nouveau_bo_map(struct nouveau_bo *);
1066extern void nouveau_bo_unmap(struct nouveau_bo *);
1067extern void nouveau_bo_placement_set(struct nouveau_bo *, uint32_t memtype);
1068extern u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index);
1069extern void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val);
1070extern u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index);
1071extern void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val);
1072
1073/* nouveau_fence.c */
1074struct nouveau_fence;
1075extern int nouveau_fence_init(struct nouveau_channel *);
1076extern void nouveau_fence_fini(struct nouveau_channel *);
1077extern void nouveau_fence_update(struct nouveau_channel *);
1078extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
1079 bool emit);
1080extern int nouveau_fence_emit(struct nouveau_fence *);
1081struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
1082extern bool nouveau_fence_signalled(void *obj, void *arg);
1083extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
1084extern int nouveau_fence_flush(void *obj, void *arg);
1085extern void nouveau_fence_unref(void **obj);
1086extern void *nouveau_fence_ref(void *obj);
1087extern void nouveau_fence_handler(struct drm_device *dev, int channel);
1088
1089/* nouveau_gem.c */
1090extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
1091 int size, int align, uint32_t flags,
1092 uint32_t tile_mode, uint32_t tile_flags,
1093 bool no_vm, bool mappable, struct nouveau_bo **);
1094extern int nouveau_gem_object_new(struct drm_gem_object *);
1095extern void nouveau_gem_object_del(struct drm_gem_object *);
1096extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
1097 struct drm_file *);
1098extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
1099 struct drm_file *);
1100extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
1101 struct drm_file *);
1102extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
1103 struct drm_file *);
1104extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
1105 struct drm_file *);
1106extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
1107 struct drm_file *);
1108extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
1109 struct drm_file *);
1110extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
1111 struct drm_file *);
1112extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
1113 struct drm_file *);
1114extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
1115 struct drm_file *);
1116
1117/* nv17_gpio.c */
1118int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
1119int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
1120
1121#ifndef ioread32_native
1122#ifdef __BIG_ENDIAN
1123#define ioread16_native ioread16be
1124#define iowrite16_native iowrite16be
1125#define ioread32_native ioread32be
1126#define iowrite32_native iowrite32be
1127#else /* def __BIG_ENDIAN */
1128#define ioread16_native ioread16
1129#define iowrite16_native iowrite16
1130#define ioread32_native ioread32
1131#define iowrite32_native iowrite32
1132#endif /* def __BIG_ENDIAN else */
1133#endif /* !ioread32_native */
1134
1135/* channel control reg access */
1136static inline u32 nvchan_rd32(struct nouveau_channel *chan, unsigned reg)
1137{
1138 return ioread32_native(chan->user + reg);
1139}
1140
1141static inline void nvchan_wr32(struct nouveau_channel *chan,
1142 unsigned reg, u32 val)
1143{
1144 iowrite32_native(val, chan->user + reg);
1145}
1146
1147/* register access */
1148static inline u32 nv_rd32(struct drm_device *dev, unsigned reg)
1149{
1150 struct drm_nouveau_private *dev_priv = dev->dev_private;
1151 return ioread32_native(dev_priv->mmio + reg);
1152}
1153
1154static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
1155{
1156 struct drm_nouveau_private *dev_priv = dev->dev_private;
1157 iowrite32_native(val, dev_priv->mmio + reg);
1158}
1159
1160static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
1161{
1162 struct drm_nouveau_private *dev_priv = dev->dev_private;
1163 return ioread8(dev_priv->mmio + reg);
1164}
1165
1166static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
1167{
1168 struct drm_nouveau_private *dev_priv = dev->dev_private;
1169 iowrite8(val, dev_priv->mmio + reg);
1170}
1171
1172#define nv_wait(reg, mask, val) \
1173 nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
1174
1175/* PRAMIN access */
1176static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
1177{
1178 struct drm_nouveau_private *dev_priv = dev->dev_private;
1179 return ioread32_native(dev_priv->ramin + offset);
1180}
1181
1182static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
1183{
1184 struct drm_nouveau_private *dev_priv = dev->dev_private;
1185 iowrite32_native(val, dev_priv->ramin + offset);
1186}
1187
1188/* object access */
1189static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1190 unsigned index)
1191{
1192 return nv_ri32(dev, obj->im_pramin->start + index * 4);
1193}
1194
1195static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
1196 unsigned index, u32 val)
1197{
1198 nv_wi32(dev, obj->im_pramin->start + index * 4, val);
1199}
1200
1201/*
1202 * Logging
1203 * Argument d is (struct drm_device *).
1204 */
1205#define NV_PRINTK(level, d, fmt, arg...) \
1206 printk(level "[" DRM_NAME "] " DRIVER_NAME " %s: " fmt, \
1207 pci_name(d->pdev), ##arg)
1208#ifndef NV_DEBUG_NOTRACE
1209#define NV_DEBUG(d, fmt, arg...) do { \
1210 if (drm_debug) { \
1211 NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
1212 __LINE__, ##arg); \
1213 } \
1214} while (0)
1215#else
1216#define NV_DEBUG(d, fmt, arg...) do { \
1217 if (drm_debug) \
1218 NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
1219} while (0)
1220#endif
1221#define NV_ERROR(d, fmt, arg...) NV_PRINTK(KERN_ERR, d, fmt, ##arg)
1222#define NV_INFO(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1223#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
1224#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
1225#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
1226
1227/* nouveau_reg_debug bitmask */
1228enum {
1229 NOUVEAU_REG_DEBUG_MC = 0x1,
1230 NOUVEAU_REG_DEBUG_VIDEO = 0x2,
1231 NOUVEAU_REG_DEBUG_FB = 0x4,
1232 NOUVEAU_REG_DEBUG_EXTDEV = 0x8,
1233 NOUVEAU_REG_DEBUG_CRTC = 0x10,
1234 NOUVEAU_REG_DEBUG_RAMDAC = 0x20,
1235 NOUVEAU_REG_DEBUG_VGACRTC = 0x40,
1236 NOUVEAU_REG_DEBUG_RMVIO = 0x80,
1237 NOUVEAU_REG_DEBUG_VGAATTR = 0x100,
1238 NOUVEAU_REG_DEBUG_EVO = 0x200,
1239};
1240
1241#define NV_REG_DEBUG(type, dev, fmt, arg...) do { \
1242 if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_##type) \
1243 NV_PRINTK(KERN_DEBUG, dev, "%s: " fmt, __func__, ##arg); \
1244} while (0)
1245
1246static inline bool
1247nv_two_heads(struct drm_device *dev)
1248{
1249 struct drm_nouveau_private *dev_priv = dev->dev_private;
1250 const int impl = dev->pci_device & 0x0ff0;
1251
1252 if (dev_priv->card_type >= NV_10 && impl != 0x0100 &&
1253 impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
1254 return true;
1255
1256 return false;
1257}
1258
1259static inline bool
1260nv_gf4_disp_arch(struct drm_device *dev)
1261{
1262 return nv_two_heads(dev) && (dev->pci_device & 0x0ff0) != 0x0110;
1263}
1264
1265static inline bool
1266nv_two_reg_pll(struct drm_device *dev)
1267{
1268 struct drm_nouveau_private *dev_priv = dev->dev_private;
1269 const int impl = dev->pci_device & 0x0ff0;
1270
1271 if (impl == 0x0310 || impl == 0x0340 || dev_priv->card_type >= NV_40)
1272 return true;
1273 return false;
1274}
1275
1276#define NV50_NVSW 0x0000506e
1277#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
1278#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
1279#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
1280#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
1281#define NV50_NVSW_DMA_VBLSEM 0x0000018c
1282#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
1283#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
1284#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
1285
1286#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
new file mode 100644
index 000000000000..bc4a24029ed1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_ENCODER_H__
28#define __NOUVEAU_ENCODER_H__
29
30#include "drm_encoder_slave.h"
31#include "nouveau_drv.h"
32
33#define NV_DPMS_CLEARED 0x80
34
35struct nouveau_encoder {
36 struct drm_encoder_slave base;
37
38 struct dcb_entry *dcb;
39 int or;
40
41 struct drm_display_mode mode;
42 int last_dpms;
43
44 struct nv04_output_reg restore;
45
46 void (*disconnect)(struct nouveau_encoder *encoder);
47
48 union {
49 struct {
50 int dpcd_version;
51 int link_nr;
52 int link_bw;
53 } dp;
54 };
55};
56
57static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
58{
59 struct drm_encoder_slave *slave = to_encoder_slave(enc);
60
61 return container_of(slave, struct nouveau_encoder, base);
62}
63
64static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
65{
66 return &enc->base.base;
67}
68
69struct nouveau_connector *
70nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
71int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
72int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
73
74struct bit_displayport_encoder_table {
75 uint32_t match;
76 uint8_t record_nr;
77 uint8_t unknown;
78 uint16_t script0;
79 uint16_t script1;
80 uint16_t unknown_table;
81} __attribute__ ((packed));
82
83struct bit_displayport_encoder_table_entry {
84 uint8_t vs_level;
85 uint8_t pre_level;
86 uint8_t reg0;
87 uint8_t reg1;
88 uint8_t reg2;
89} __attribute__ ((packed));
90
91#endif /* __NOUVEAU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
new file mode 100644
index 000000000000..4a3f31aa1949
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FB_H__
28#define __NOUVEAU_FB_H__
29
30struct nouveau_framebuffer {
31 struct drm_framebuffer base;
32 struct nouveau_bo *nvbo;
33};
34
35static inline struct nouveau_framebuffer *
36nouveau_framebuffer(struct drm_framebuffer *fb)
37{
38 return container_of(fb, struct nouveau_framebuffer, base);
39}
40
41extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
42
43struct drm_framebuffer *
44nouveau_framebuffer_create(struct drm_device *, struct nouveau_bo *,
45 struct drm_mode_fb_cmd *);
46
47#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
new file mode 100644
index 000000000000..36e8c5e4503a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -0,0 +1,380 @@
1/*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26
27#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/errno.h>
30#include <linux/string.h>
31#include <linux/mm.h>
32#include <linux/tty.h>
33#include <linux/slab.h>
34#include <linux/sysrq.h>
35#include <linux/delay.h>
36#include <linux/fb.h>
37#include <linux/init.h>
38#include <linux/screen_info.h>
39
40#include "drmP.h"
41#include "drm.h"
42#include "drm_crtc.h"
43#include "drm_crtc_helper.h"
44#include "drm_fb_helper.h"
45#include "nouveau_drv.h"
46#include "nouveau_drm.h"
47#include "nouveau_crtc.h"
48#include "nouveau_fb.h"
49#include "nouveau_fbcon.h"
50#include "nouveau_dma.h"
51
52static int
53nouveau_fbcon_sync(struct fb_info *info)
54{
55 struct nouveau_fbcon_par *par = info->par;
56 struct drm_device *dev = par->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 struct nouveau_channel *chan = dev_priv->channel;
59 int ret, i;
60
61 if (!chan->accel_done ||
62 info->state != FBINFO_STATE_RUNNING ||
63 info->flags & FBINFO_HWACCEL_DISABLED)
64 return 0;
65
66 if (RING_SPACE(chan, 4)) {
67 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
68 info->flags |= FBINFO_HWACCEL_DISABLED;
69 return 0;
70 }
71
72 BEGIN_RING(chan, 0, 0x0104, 1);
73 OUT_RING(chan, 0);
74 BEGIN_RING(chan, 0, 0x0100, 1);
75 OUT_RING(chan, 0);
76 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy + 3, 0xffffffff);
77 FIRE_RING(chan);
78
79 ret = -EBUSY;
80 for (i = 0; i < 100000; i++) {
81 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy + 3)) {
82 ret = 0;
83 break;
84 }
85 DRM_UDELAY(1);
86 }
87
88 if (ret) {
89 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
90 info->flags |= FBINFO_HWACCEL_DISABLED;
91 return 0;
92 }
93
94 chan->accel_done = false;
95 return 0;
96}
97
98static struct fb_ops nouveau_fbcon_ops = {
99 .owner = THIS_MODULE,
100 .fb_check_var = drm_fb_helper_check_var,
101 .fb_set_par = drm_fb_helper_set_par,
102 .fb_setcolreg = drm_fb_helper_setcolreg,
103 .fb_fillrect = cfb_fillrect,
104 .fb_copyarea = cfb_copyarea,
105 .fb_imageblit = cfb_imageblit,
106 .fb_sync = nouveau_fbcon_sync,
107 .fb_pan_display = drm_fb_helper_pan_display,
108 .fb_blank = drm_fb_helper_blank,
109 .fb_setcmap = drm_fb_helper_setcmap,
110};
111
112static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
113 u16 blue, int regno)
114{
115 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
116
117 nv_crtc->lut.r[regno] = red;
118 nv_crtc->lut.g[regno] = green;
119 nv_crtc->lut.b[regno] = blue;
120}
121
122static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
123 u16 *blue, int regno)
124{
125 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
126
127 *red = nv_crtc->lut.r[regno];
128 *green = nv_crtc->lut.g[regno];
129 *blue = nv_crtc->lut.b[regno];
130}
131
132static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
133 .gamma_set = nouveau_fbcon_gamma_set,
134 .gamma_get = nouveau_fbcon_gamma_get
135};
136
137#if defined(__i386__) || defined(__x86_64__)
138static bool
139nouveau_fbcon_has_vesafb_or_efifb(struct drm_device *dev)
140{
141 struct pci_dev *pdev = dev->pdev;
142 int ramin;
143
144 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB &&
145 screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
146 return false;
147
148 if (screen_info.lfb_base < pci_resource_start(pdev, 1))
149 goto not_fb;
150
151 if (screen_info.lfb_base + screen_info.lfb_size >=
152 pci_resource_start(pdev, 1) + pci_resource_len(pdev, 1))
153 goto not_fb;
154
155 return true;
156not_fb:
157 ramin = 2;
158 if (pci_resource_len(pdev, ramin) == 0) {
159 ramin = 3;
160 if (pci_resource_len(pdev, ramin) == 0)
161 return false;
162 }
163
164 if (screen_info.lfb_base < pci_resource_start(pdev, ramin))
165 return false;
166
167 if (screen_info.lfb_base + screen_info.lfb_size >=
168 pci_resource_start(pdev, ramin) + pci_resource_len(pdev, ramin))
169 return false;
170
171 return true;
172}
173#endif
174
175void
176nouveau_fbcon_zfill(struct drm_device *dev)
177{
178 struct drm_nouveau_private *dev_priv = dev->dev_private;
179 struct fb_info *info = dev_priv->fbdev_info;
180 struct fb_fillrect rect;
181
182 /* Clear the entire fbcon. The drm will program every connector
183 * with it's preferred mode. If the sizes differ, one display will
184 * quite likely have garbage around the console.
185 */
186 rect.dx = rect.dy = 0;
187 rect.width = info->var.xres_virtual;
188 rect.height = info->var.yres_virtual;
189 rect.color = 0;
190 rect.rop = ROP_COPY;
191 info->fbops->fb_fillrect(info, &rect);
192}
193
194static int
195nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
196 uint32_t fb_height, uint32_t surface_width,
197 uint32_t surface_height, uint32_t surface_depth,
198 uint32_t surface_bpp, struct drm_framebuffer **pfb)
199{
200 struct drm_nouveau_private *dev_priv = dev->dev_private;
201 struct fb_info *info;
202 struct nouveau_fbcon_par *par;
203 struct drm_framebuffer *fb;
204 struct nouveau_framebuffer *nouveau_fb;
205 struct nouveau_bo *nvbo;
206 struct drm_mode_fb_cmd mode_cmd;
207 struct device *device = &dev->pdev->dev;
208 int size, ret;
209
210 mode_cmd.width = surface_width;
211 mode_cmd.height = surface_height;
212
213 mode_cmd.bpp = surface_bpp;
214 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
215 mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
216 mode_cmd.depth = surface_depth;
217
218 size = mode_cmd.pitch * mode_cmd.height;
219 size = ALIGN(size, PAGE_SIZE);
220
221 ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
222 0, 0x0000, false, true, &nvbo);
223 if (ret) {
224 NV_ERROR(dev, "failed to allocate framebuffer\n");
225 goto out;
226 }
227
228 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
229 if (ret) {
230 NV_ERROR(dev, "failed to pin fb: %d\n", ret);
231 nouveau_bo_ref(NULL, &nvbo);
232 goto out;
233 }
234
235 ret = nouveau_bo_map(nvbo);
236 if (ret) {
237 NV_ERROR(dev, "failed to map fb: %d\n", ret);
238 nouveau_bo_unpin(nvbo);
239 nouveau_bo_ref(NULL, &nvbo);
240 goto out;
241 }
242
243 mutex_lock(&dev->struct_mutex);
244
245 fb = nouveau_framebuffer_create(dev, nvbo, &mode_cmd);
246 if (!fb) {
247 ret = -ENOMEM;
248 NV_ERROR(dev, "failed to allocate fb.\n");
249 goto out_unref;
250 }
251
252 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
253
254 nouveau_fb = nouveau_framebuffer(fb);
255 *pfb = fb;
256
257 info = framebuffer_alloc(sizeof(struct nouveau_fbcon_par), device);
258 if (!info) {
259 ret = -ENOMEM;
260 goto out_unref;
261 }
262
263 par = info->par;
264 par->helper.funcs = &nouveau_fbcon_helper_funcs;
265 par->helper.dev = dev;
266 ret = drm_fb_helper_init_crtc_count(&par->helper, 2, 4);
267 if (ret)
268 goto out_unref;
269 dev_priv->fbdev_info = info;
270
271 strcpy(info->fix.id, "nouveaufb");
272 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
273 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT;
274 info->fbops = &nouveau_fbcon_ops;
275 info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
276 dev_priv->vm_vram_base;
277 info->fix.smem_len = size;
278
279 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
280 info->screen_size = size;
281
282 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
283 drm_fb_helper_fill_var(info, fb, fb_width, fb_height);
284
285 /* FIXME: we really shouldn't expose mmio space at all */
286 info->fix.mmio_start = pci_resource_start(dev->pdev, 1);
287 info->fix.mmio_len = pci_resource_len(dev->pdev, 1);
288
289 /* Set aperture base/size for vesafb takeover */
290#if defined(__i386__) || defined(__x86_64__)
291 if (nouveau_fbcon_has_vesafb_or_efifb(dev)) {
292 /* Some NVIDIA VBIOS' are stupid and decide to put the
293 * framebuffer in the middle of the PRAMIN BAR for
294 * whatever reason. We need to know the exact lfb_base
295 * to get vesafb kicked off, and the only reliable way
296 * we have left is to find out lfb_base the same way
297 * vesafb did.
298 */
299 info->aperture_base = screen_info.lfb_base;
300 info->aperture_size = screen_info.lfb_size;
301 if (screen_info.orig_video_isVGA == VIDEO_TYPE_VLFB)
302 info->aperture_size *= 65536;
303 } else
304#endif
305 {
306 info->aperture_base = info->fix.mmio_start;
307 info->aperture_size = info->fix.mmio_len;
308 }
309
310 info->pixmap.size = 64*1024;
311 info->pixmap.buf_align = 8;
312 info->pixmap.access_align = 32;
313 info->pixmap.flags = FB_PIXMAP_SYSTEM;
314 info->pixmap.scan_align = 1;
315
316 fb->fbdev = info;
317
318 par->nouveau_fb = nouveau_fb;
319 par->dev = dev;
320
321 switch (dev_priv->card_type) {
322 case NV_50:
323 nv50_fbcon_accel_init(info);
324 break;
325 default:
326 nv04_fbcon_accel_init(info);
327 break;
328 };
329
330 nouveau_fbcon_zfill(dev);
331
332 /* To allow resizeing without swapping buffers */
333 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
334 nouveau_fb->base.width,
335 nouveau_fb->base.height,
336 nvbo->bo.offset, nvbo);
337
338 mutex_unlock(&dev->struct_mutex);
339 return 0;
340
341out_unref:
342 mutex_unlock(&dev->struct_mutex);
343out:
344 return ret;
345}
346
347int
348nouveau_fbcon_probe(struct drm_device *dev)
349{
350 NV_DEBUG(dev, "\n");
351
352 return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
353}
354
355int
356nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
357{
358 struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fb);
359 struct fb_info *info;
360
361 if (!fb)
362 return -EINVAL;
363
364 info = fb->fbdev;
365 if (info) {
366 struct nouveau_fbcon_par *par = info->par;
367
368 unregister_framebuffer(info);
369 nouveau_bo_unmap(nouveau_fb->nvbo);
370 mutex_lock(&dev->struct_mutex);
371 drm_gem_object_unreference(nouveau_fb->nvbo->gem);
372 nouveau_fb->nvbo = NULL;
373 mutex_unlock(&dev->struct_mutex);
374 if (par)
375 drm_fb_helper_free(&par->helper);
376 framebuffer_release(info);
377 }
378
379 return 0;
380}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
new file mode 100644
index 000000000000..8531140fedbc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NOUVEAU_FBCON_H__
28#define __NOUVEAU_FBCON_H__
29
30#include "drm_fb_helper.h"
31
32struct nouveau_fbcon_par {
33 struct drm_fb_helper helper;
34 struct drm_device *dev;
35 struct nouveau_framebuffer *nouveau_fb;
36};
37
38int nouveau_fbcon_probe(struct drm_device *dev);
39int nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb);
40void nouveau_fbcon_restore(void);
41void nouveau_fbcon_zfill(struct drm_device *dev);
42
43int nv04_fbcon_accel_init(struct fb_info *info);
44int nv50_fbcon_accel_init(struct fb_info *info);
45
46#endif /* __NV50_FBCON_H__ */
47
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 000000000000..0cff7eb3690a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,262 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_dma.h"
32
33#define USE_REFCNT (dev_priv->card_type >= NV_10)
34
35struct nouveau_fence {
36 struct nouveau_channel *channel;
37 struct kref refcount;
38 struct list_head entry;
39
40 uint32_t sequence;
41 bool signalled;
42};
43
44static inline struct nouveau_fence *
45nouveau_fence(void *sync_obj)
46{
47 return (struct nouveau_fence *)sync_obj;
48}
49
50static void
51nouveau_fence_del(struct kref *ref)
52{
53 struct nouveau_fence *fence =
54 container_of(ref, struct nouveau_fence, refcount);
55
56 kfree(fence);
57}
58
59void
60nouveau_fence_update(struct nouveau_channel *chan)
61{
62 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
63 struct list_head *entry, *tmp;
64 struct nouveau_fence *fence;
65 uint32_t sequence;
66
67 if (USE_REFCNT)
68 sequence = nvchan_rd32(chan, 0x48);
69 else
70 sequence = chan->fence.last_sequence_irq;
71
72 if (chan->fence.sequence_ack == sequence)
73 return;
74 chan->fence.sequence_ack = sequence;
75
76 list_for_each_safe(entry, tmp, &chan->fence.pending) {
77 fence = list_entry(entry, struct nouveau_fence, entry);
78
79 sequence = fence->sequence;
80 fence->signalled = true;
81 list_del(&fence->entry);
82 kref_put(&fence->refcount, nouveau_fence_del);
83
84 if (sequence == chan->fence.sequence_ack)
85 break;
86 }
87}
88
89int
90nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
91 bool emit)
92{
93 struct nouveau_fence *fence;
94 int ret = 0;
95
96 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
97 if (!fence)
98 return -ENOMEM;
99 kref_init(&fence->refcount);
100 fence->channel = chan;
101
102 if (emit)
103 ret = nouveau_fence_emit(fence);
104
105 if (ret)
106 nouveau_fence_unref((void *)&fence);
107 *pfence = fence;
108 return ret;
109}
110
111struct nouveau_channel *
112nouveau_fence_channel(struct nouveau_fence *fence)
113{
114 return fence ? fence->channel : NULL;
115}
116
117int
118nouveau_fence_emit(struct nouveau_fence *fence)
119{
120 struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
121 struct nouveau_channel *chan = fence->channel;
122 unsigned long flags;
123 int ret;
124
125 ret = RING_SPACE(chan, 2);
126 if (ret)
127 return ret;
128
129 if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
130 spin_lock_irqsave(&chan->fence.lock, flags);
131 nouveau_fence_update(chan);
132 spin_unlock_irqrestore(&chan->fence.lock, flags);
133
134 BUG_ON(chan->fence.sequence ==
135 chan->fence.sequence_ack - 1);
136 }
137
138 fence->sequence = ++chan->fence.sequence;
139
140 kref_get(&fence->refcount);
141 spin_lock_irqsave(&chan->fence.lock, flags);
142 list_add_tail(&fence->entry, &chan->fence.pending);
143 spin_unlock_irqrestore(&chan->fence.lock, flags);
144
145 BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
146 OUT_RING(chan, fence->sequence);
147 FIRE_RING(chan);
148
149 return 0;
150}
151
152void
153nouveau_fence_unref(void **sync_obj)
154{
155 struct nouveau_fence *fence = nouveau_fence(*sync_obj);
156
157 if (fence)
158 kref_put(&fence->refcount, nouveau_fence_del);
159 *sync_obj = NULL;
160}
161
162void *
163nouveau_fence_ref(void *sync_obj)
164{
165 struct nouveau_fence *fence = nouveau_fence(sync_obj);
166
167 kref_get(&fence->refcount);
168 return sync_obj;
169}
170
171bool
172nouveau_fence_signalled(void *sync_obj, void *sync_arg)
173{
174 struct nouveau_fence *fence = nouveau_fence(sync_obj);
175 struct nouveau_channel *chan = fence->channel;
176 unsigned long flags;
177
178 if (fence->signalled)
179 return true;
180
181 spin_lock_irqsave(&chan->fence.lock, flags);
182 nouveau_fence_update(chan);
183 spin_unlock_irqrestore(&chan->fence.lock, flags);
184 return fence->signalled;
185}
186
187int
188nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
189{
190 unsigned long timeout = jiffies + (3 * DRM_HZ);
191 int ret = 0;
192
193 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
194
195 while (1) {
196 if (nouveau_fence_signalled(sync_obj, sync_arg))
197 break;
198
199 if (time_after_eq(jiffies, timeout)) {
200 ret = -EBUSY;
201 break;
202 }
203
204 if (lazy)
205 schedule_timeout(1);
206
207 if (intr && signal_pending(current)) {
208 ret = -ERESTART;
209 break;
210 }
211 }
212
213 __set_current_state(TASK_RUNNING);
214
215 return ret;
216}
217
218int
219nouveau_fence_flush(void *sync_obj, void *sync_arg)
220{
221 return 0;
222}
223
224void
225nouveau_fence_handler(struct drm_device *dev, int channel)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_channel *chan = NULL;
229
230 if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
231 chan = dev_priv->fifos[channel];
232
233 if (chan) {
234 spin_lock_irq(&chan->fence.lock);
235 nouveau_fence_update(chan);
236 spin_unlock_irq(&chan->fence.lock);
237 }
238}
239
240int
241nouveau_fence_init(struct nouveau_channel *chan)
242{
243 INIT_LIST_HEAD(&chan->fence.pending);
244 spin_lock_init(&chan->fence.lock);
245 return 0;
246}
247
248void
249nouveau_fence_fini(struct nouveau_channel *chan)
250{
251 struct list_head *entry, *tmp;
252 struct nouveau_fence *fence;
253
254 list_for_each_safe(entry, tmp, &chan->fence.pending) {
255 fence = list_entry(entry, struct nouveau_fence, entry);
256
257 fence->signalled = true;
258 list_del(&fence->entry);
259 kref_put(&fence->refcount, nouveau_fence_del);
260 }
261}
262
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
new file mode 100644
index 000000000000..11f831f0ddc5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -0,0 +1,992 @@
1/*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26#include "drmP.h"
27#include "drm.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_drm.h"
31#include "nouveau_dma.h"
32
33#define nouveau_gem_pushbuf_sync(chan) 0
34
35int
36nouveau_gem_object_new(struct drm_gem_object *gem)
37{
38 return 0;
39}
40
41void
42nouveau_gem_object_del(struct drm_gem_object *gem)
43{
44 struct nouveau_bo *nvbo = gem->driver_private;
45 struct ttm_buffer_object *bo = &nvbo->bo;
46
47 if (!nvbo)
48 return;
49 nvbo->gem = NULL;
50
51 if (unlikely(nvbo->cpu_filp))
52 ttm_bo_synccpu_write_release(bo);
53
54 if (unlikely(nvbo->pin_refcnt)) {
55 nvbo->pin_refcnt = 1;
56 nouveau_bo_unpin(nvbo);
57 }
58
59 ttm_bo_unref(&bo);
60}
61
62int
63nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
64 int size, int align, uint32_t flags, uint32_t tile_mode,
65 uint32_t tile_flags, bool no_vm, bool mappable,
66 struct nouveau_bo **pnvbo)
67{
68 struct nouveau_bo *nvbo;
69 int ret;
70
71 ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
72 tile_flags, no_vm, mappable, pnvbo);
73 if (ret)
74 return ret;
75 nvbo = *pnvbo;
76
77 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
78 if (!nvbo->gem) {
79 nouveau_bo_ref(NULL, pnvbo);
80 return -ENOMEM;
81 }
82
83 nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
84 nvbo->gem->driver_private = nvbo;
85 return 0;
86}
87
88static int
89nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep)
90{
91 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
92
93 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
94 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
95 else
96 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
97
98 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
99 rep->offset = nvbo->bo.offset;
100 rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0;
101 rep->tile_mode = nvbo->tile_mode;
102 rep->tile_flags = nvbo->tile_flags;
103 return 0;
104}
105
106static bool
107nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) {
108 switch (tile_flags) {
109 case 0x0000:
110 case 0x1800:
111 case 0x2800:
112 case 0x4800:
113 case 0x7000:
114 case 0x7400:
115 case 0x7a00:
116 case 0xe000:
117 break;
118 default:
119 NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags);
120 return false;
121 }
122
123 return true;
124}
125
126int
127nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
128 struct drm_file *file_priv)
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 struct drm_nouveau_gem_new *req = data;
132 struct nouveau_bo *nvbo = NULL;
133 struct nouveau_channel *chan = NULL;
134 uint32_t flags = 0;
135 int ret = 0;
136
137 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
138
139 if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
140 dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
141
142 if (req->channel_hint) {
143 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint,
144 file_priv, chan);
145 }
146
147 if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM)
148 flags |= TTM_PL_FLAG_VRAM;
149 if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART)
150 flags |= TTM_PL_FLAG_TT;
151 if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU)
152 flags |= TTM_PL_FLAG_SYSTEM;
153
154 if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags))
155 return -EINVAL;
156
157 ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags,
158 req->info.tile_mode, req->info.tile_flags, false,
159 (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE),
160 &nvbo);
161 if (ret)
162 return ret;
163
164 ret = nouveau_gem_info(nvbo->gem, &req->info);
165 if (ret)
166 goto out;
167
168 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
169out:
170 mutex_lock(&dev->struct_mutex);
171 drm_gem_object_handle_unreference(nvbo->gem);
172 mutex_unlock(&dev->struct_mutex);
173
174 if (ret)
175 drm_gem_object_unreference(nvbo->gem);
176 return ret;
177}
178
179static int
180nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
181 uint32_t write_domains, uint32_t valid_domains)
182{
183 struct nouveau_bo *nvbo = gem->driver_private;
184 struct ttm_buffer_object *bo = &nvbo->bo;
185 uint64_t flags;
186
187 if (!valid_domains || (!read_domains && !write_domains))
188 return -EINVAL;
189
190 if (write_domains) {
191 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
192 (write_domains & NOUVEAU_GEM_DOMAIN_VRAM))
193 flags = TTM_PL_FLAG_VRAM;
194 else
195 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
196 (write_domains & NOUVEAU_GEM_DOMAIN_GART))
197 flags = TTM_PL_FLAG_TT;
198 else
199 return -EINVAL;
200 } else {
201 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
202 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
203 bo->mem.mem_type == TTM_PL_VRAM)
204 flags = TTM_PL_FLAG_VRAM;
205 else
206 if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
207 (read_domains & NOUVEAU_GEM_DOMAIN_GART) &&
208 bo->mem.mem_type == TTM_PL_TT)
209 flags = TTM_PL_FLAG_TT;
210 else
211 if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
212 (read_domains & NOUVEAU_GEM_DOMAIN_VRAM))
213 flags = TTM_PL_FLAG_VRAM;
214 else
215 flags = TTM_PL_FLAG_TT;
216 }
217
218 nouveau_bo_placement_set(nvbo, flags);
219 return 0;
220}
221
222struct validate_op {
223 struct nouveau_fence *fence;
224 struct list_head vram_list;
225 struct list_head gart_list;
226 struct list_head both_list;
227};
228
229static void
230validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
231{
232 struct list_head *entry, *tmp;
233 struct nouveau_bo *nvbo;
234
235 list_for_each_safe(entry, tmp, list) {
236 nvbo = list_entry(entry, struct nouveau_bo, entry);
237 if (likely(fence)) {
238 struct nouveau_fence *prev_fence;
239
240 spin_lock(&nvbo->bo.lock);
241 prev_fence = nvbo->bo.sync_obj;
242 nvbo->bo.sync_obj = nouveau_fence_ref(fence);
243 spin_unlock(&nvbo->bo.lock);
244 nouveau_fence_unref((void *)&prev_fence);
245 }
246
247 list_del(&nvbo->entry);
248 nvbo->reserved_by = NULL;
249 ttm_bo_unreserve(&nvbo->bo);
250 drm_gem_object_unreference(nvbo->gem);
251 }
252}
253
254static void
255validate_fini(struct validate_op *op, bool success)
256{
257 struct nouveau_fence *fence = op->fence;
258
259 if (unlikely(!success))
260 op->fence = NULL;
261
262 validate_fini_list(&op->vram_list, op->fence);
263 validate_fini_list(&op->gart_list, op->fence);
264 validate_fini_list(&op->both_list, op->fence);
265 nouveau_fence_unref((void *)&fence);
266}
267
268static int
269validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
270 struct drm_nouveau_gem_pushbuf_bo *pbbo,
271 int nr_buffers, struct validate_op *op)
272{
273 struct drm_device *dev = chan->dev;
274 struct drm_nouveau_private *dev_priv = dev->dev_private;
275 uint32_t sequence;
276 int trycnt = 0;
277 int ret, i;
278
279 sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
280retry:
281 if (++trycnt > 100000) {
282 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
283 return -EINVAL;
284 }
285
286 for (i = 0; i < nr_buffers; i++) {
287 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
288 struct drm_gem_object *gem;
289 struct nouveau_bo *nvbo;
290
291 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
292 if (!gem) {
293 NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
294 validate_fini(op, NULL);
295 return -EINVAL;
296 }
297 nvbo = gem->driver_private;
298
299 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
300 NV_ERROR(dev, "multiple instances of buffer %d on "
301 "validation list\n", b->handle);
302 validate_fini(op, NULL);
303 return -EINVAL;
304 }
305
306 ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence);
307 if (ret) {
308 validate_fini(op, NULL);
309 if (ret == -EAGAIN)
310 ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
311 drm_gem_object_unreference(gem);
312 if (ret)
313 return ret;
314 goto retry;
315 }
316
317 nvbo->reserved_by = file_priv;
318 nvbo->pbbo_index = i;
319 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
320 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
321 list_add_tail(&nvbo->entry, &op->both_list);
322 else
323 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
324 list_add_tail(&nvbo->entry, &op->vram_list);
325 else
326 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
327 list_add_tail(&nvbo->entry, &op->gart_list);
328 else {
329 NV_ERROR(dev, "invalid valid domains: 0x%08x\n",
330 b->valid_domains);
331 validate_fini(op, NULL);
332 return -EINVAL;
333 }
334
335 if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) {
336 validate_fini(op, NULL);
337
338 if (nvbo->cpu_filp == file_priv) {
339 NV_ERROR(dev, "bo %p mapped by process trying "
340 "to validate it!\n", nvbo);
341 return -EINVAL;
342 }
343
344 ret = ttm_bo_wait_cpu(&nvbo->bo, false);
345 if (ret == -ERESTART)
346 ret = -EAGAIN;
347 if (ret)
348 return ret;
349 goto retry;
350 }
351 }
352
353 return 0;
354}
355
356static int
357validate_list(struct nouveau_channel *chan, struct list_head *list,
358 struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
359{
360 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
361 (void __force __user *)(uintptr_t)user_pbbo_ptr;
362 struct nouveau_bo *nvbo;
363 int ret, relocs = 0;
364
365 list_for_each_entry(nvbo, list, entry) {
366 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
367 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
368
369 if (prev_fence && nouveau_fence_channel(prev_fence) != chan) {
370 spin_lock(&nvbo->bo.lock);
371 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
372 spin_unlock(&nvbo->bo.lock);
373 if (unlikely(ret))
374 return ret;
375 }
376
377 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
378 b->write_domains,
379 b->valid_domains);
380 if (unlikely(ret))
381 return ret;
382
383 nvbo->channel = chan;
384 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
385 false, false);
386 nvbo->channel = NULL;
387 if (unlikely(ret))
388 return ret;
389
390 if (nvbo->bo.offset == b->presumed_offset &&
391 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
392 b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
393 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
394 b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
395 continue;
396
397 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
398 b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
399 else
400 b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
401 b->presumed_offset = nvbo->bo.offset;
402 b->presumed_ok = 0;
403 relocs++;
404
405 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
406 return -EFAULT;
407 }
408
409 return relocs;
410}
411
412static int
413nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
414 struct drm_file *file_priv,
415 struct drm_nouveau_gem_pushbuf_bo *pbbo,
416 uint64_t user_buffers, int nr_buffers,
417 struct validate_op *op, int *apply_relocs)
418{
419 int ret, relocs = 0;
420
421 INIT_LIST_HEAD(&op->vram_list);
422 INIT_LIST_HEAD(&op->gart_list);
423 INIT_LIST_HEAD(&op->both_list);
424
425 ret = nouveau_fence_new(chan, &op->fence, false);
426 if (ret)
427 return ret;
428
429 if (nr_buffers == 0)
430 return 0;
431
432 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
433 if (unlikely(ret))
434 return ret;
435
436 ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
437 if (unlikely(ret < 0)) {
438 validate_fini(op, NULL);
439 return ret;
440 }
441 relocs += ret;
442
443 ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
444 if (unlikely(ret < 0)) {
445 validate_fini(op, NULL);
446 return ret;
447 }
448 relocs += ret;
449
450 ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
451 if (unlikely(ret < 0)) {
452 validate_fini(op, NULL);
453 return ret;
454 }
455 relocs += ret;
456
457 *apply_relocs = relocs;
458 return 0;
459}
460
461static inline void *
462u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
463{
464 void *mem;
465 void __user *userptr = (void __force __user *)(uintptr_t)user;
466
467 mem = kmalloc(nmemb * size, GFP_KERNEL);
468 if (!mem)
469 return ERR_PTR(-ENOMEM);
470
471 if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
472 kfree(mem);
473 return ERR_PTR(-EFAULT);
474 }
475
476 return mem;
477}
478
479static int
480nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
481 struct drm_nouveau_gem_pushbuf_bo *bo,
482 int nr_relocs, uint64_t ptr_relocs,
483 int nr_dwords, int first_dword,
484 uint32_t *pushbuf, bool is_iomem)
485{
486 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
487 struct drm_device *dev = chan->dev;
488 int ret = 0, i;
489
490 reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
491 if (IS_ERR(reloc))
492 return PTR_ERR(reloc);
493
494 for (i = 0; i < nr_relocs; i++) {
495 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
496 struct drm_nouveau_gem_pushbuf_bo *b;
497 uint32_t data;
498
499 if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
500 r->reloc_index >= first_dword + nr_dwords) {
501 NV_ERROR(dev, "Bad relocation %d\n", i);
502 NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
503 NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
504 ret = -EINVAL;
505 break;
506 }
507
508 b = &bo[r->bo_index];
509 if (b->presumed_ok)
510 continue;
511
512 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
513 data = b->presumed_offset + r->data;
514 else
515 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
516 data = (b->presumed_offset + r->data) >> 32;
517 else
518 data = r->data;
519
520 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
521 if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
522 data |= r->tor;
523 else
524 data |= r->vor;
525 }
526
527 if (is_iomem)
528 iowrite32_native(data, (void __force __iomem *)
529 &pushbuf[r->reloc_index]);
530 else
531 pushbuf[r->reloc_index] = data;
532 }
533
534 kfree(reloc);
535 return ret;
536}
537
538int
539nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
540 struct drm_file *file_priv)
541{
542 struct drm_nouveau_gem_pushbuf *req = data;
543 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
544 struct nouveau_channel *chan;
545 struct validate_op op;
546 uint32_t *pushbuf = NULL;
547 int ret = 0, do_reloc = 0, i;
548
549 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
550 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
551
552 if (req->nr_dwords >= chan->dma.max ||
553 req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
554 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
555 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
556 NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
557 chan->dma.max - 1);
558 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
559 NOUVEAU_GEM_MAX_BUFFERS);
560 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
561 NOUVEAU_GEM_MAX_RELOCS);
562 return -EINVAL;
563 }
564
565 pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
566 if (IS_ERR(pushbuf))
567 return PTR_ERR(pushbuf);
568
569 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
570 if (IS_ERR(bo)) {
571 kfree(pushbuf);
572 return PTR_ERR(bo);
573 }
574
575 mutex_lock(&dev->struct_mutex);
576
577 /* Validate buffer list */
578 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
579 req->nr_buffers, &op, &do_reloc);
580 if (ret)
581 goto out;
582
583 /* Apply any relocations that are required */
584 if (do_reloc) {
585 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
586 bo, req->nr_relocs,
587 req->relocs,
588 req->nr_dwords, 0,
589 pushbuf, false);
590 if (ret)
591 goto out;
592 }
593
594 /* Emit push buffer to the hw
595 */
596 ret = RING_SPACE(chan, req->nr_dwords);
597 if (ret)
598 goto out;
599
600 OUT_RINGp(chan, pushbuf, req->nr_dwords);
601
602 ret = nouveau_fence_emit(op.fence);
603 if (ret) {
604 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
605 WIND_RING(chan);
606 goto out;
607 }
608
609 if (nouveau_gem_pushbuf_sync(chan)) {
610 ret = nouveau_fence_wait(op.fence, NULL, false, false);
611 if (ret) {
612 for (i = 0; i < req->nr_dwords; i++)
613 NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
614 NV_ERROR(dev, "^^ above push buffer is fail :(\n");
615 }
616 }
617
618out:
619 validate_fini(&op, ret == 0);
620 mutex_unlock(&dev->struct_mutex);
621 kfree(pushbuf);
622 kfree(bo);
623 return ret;
624}
625
626#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
627
628int
629nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
630 struct drm_file *file_priv)
631{
632 struct drm_nouveau_private *dev_priv = dev->dev_private;
633 struct drm_nouveau_gem_pushbuf_call *req = data;
634 struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
635 struct nouveau_channel *chan;
636 struct drm_gem_object *gem;
637 struct nouveau_bo *pbbo;
638 struct validate_op op;
639 int i, ret = 0, do_reloc = 0;
640
641 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
642 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
643
644 if (unlikely(req->handle == 0))
645 goto out_next;
646
647 if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
648 req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
649 NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
650 NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
651 NOUVEAU_GEM_MAX_BUFFERS);
652 NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
653 NOUVEAU_GEM_MAX_RELOCS);
654 return -EINVAL;
655 }
656
657 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
658 if (IS_ERR(bo))
659 return PTR_ERR(bo);
660
661 mutex_lock(&dev->struct_mutex);
662
663 /* Validate buffer list */
664 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
665 req->nr_buffers, &op, &do_reloc);
666 if (ret) {
667 NV_ERROR(dev, "validate: %d\n", ret);
668 goto out;
669 }
670
671 /* Validate DMA push buffer */
672 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
673 if (!gem) {
674 NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
675 ret = -EINVAL;
676 goto out;
677 }
678 pbbo = nouveau_gem_object(gem);
679
680 ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
681 chan->fence.sequence);
682 if (ret) {
683 NV_ERROR(dev, "resv pb: %d\n", ret);
684 drm_gem_object_unreference(gem);
685 goto out;
686 }
687
688 nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
689 ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
690 if (ret) {
691 NV_ERROR(dev, "validate pb: %d\n", ret);
692 ttm_bo_unreserve(&pbbo->bo);
693 drm_gem_object_unreference(gem);
694 goto out;
695 }
696
697 list_add_tail(&pbbo->entry, &op.both_list);
698
699 /* If presumed return address doesn't match, we need to map the
700 * push buffer and fix it..
701 */
702 if (!PUSHBUF_CAL) {
703 uint32_t retaddy;
704
705 if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
706 ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
707 if (ret) {
708 NV_ERROR(dev, "jmp_space: %d\n", ret);
709 goto out;
710 }
711 }
712
713 retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
714 retaddy |= 0x20000000;
715 if (retaddy != req->suffix0) {
716 req->suffix0 = retaddy;
717 do_reloc = 1;
718 }
719 }
720
721 /* Apply any relocations that are required */
722 if (do_reloc) {
723 void *pbvirt;
724 bool is_iomem;
725 ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
726 &pbbo->kmap);
727 if (ret) {
728 NV_ERROR(dev, "kmap pb: %d\n", ret);
729 goto out;
730 }
731
732 pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
733 ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
734 req->nr_relocs,
735 req->relocs,
736 req->nr_dwords,
737 req->offset / 4,
738 pbvirt, is_iomem);
739
740 if (!PUSHBUF_CAL) {
741 nouveau_bo_wr32(pbbo,
742 req->offset / 4 + req->nr_dwords - 2,
743 req->suffix0);
744 }
745
746 ttm_bo_kunmap(&pbbo->kmap);
747 if (ret) {
748 NV_ERROR(dev, "reloc apply: %d\n", ret);
749 goto out;
750 }
751 }
752
753 if (PUSHBUF_CAL) {
754 ret = RING_SPACE(chan, 2);
755 if (ret) {
756 NV_ERROR(dev, "cal_space: %d\n", ret);
757 goto out;
758 }
759 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
760 req->offset) | 2);
761 OUT_RING(chan, 0);
762 } else {
763 ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
764 if (ret) {
765 NV_ERROR(dev, "jmp_space: %d\n", ret);
766 goto out;
767 }
768 OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
769 req->offset) | 0x20000000);
770 OUT_RING(chan, 0);
771
772 /* Space the jumps apart with NOPs. */
773 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
774 OUT_RING(chan, 0);
775 }
776
777 ret = nouveau_fence_emit(op.fence);
778 if (ret) {
779 NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
780 WIND_RING(chan);
781 goto out;
782 }
783
784out:
785 validate_fini(&op, ret == 0);
786 mutex_unlock(&dev->struct_mutex);
787 kfree(bo);
788
789out_next:
790 if (PUSHBUF_CAL) {
791 req->suffix0 = 0x00020000;
792 req->suffix1 = 0x00000000;
793 } else {
794 req->suffix0 = 0x20000000 |
795 (chan->pushbuf_base + ((chan->dma.cur + 2) << 2));
796 req->suffix1 = 0x00000000;
797 }
798
799 return ret;
800}
801
802int
803nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
804 struct drm_file *file_priv)
805{
806 struct drm_nouveau_private *dev_priv = dev->dev_private;
807 struct drm_nouveau_gem_pushbuf_call *req = data;
808
809 req->vram_available = dev_priv->fb_aper_free;
810 req->gart_available = dev_priv->gart_info.aper_free;
811
812 return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
813}
814
815static inline uint32_t
816domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
817{
818 uint32_t flags = 0;
819
820 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
821 flags |= TTM_PL_FLAG_VRAM;
822 if (domain & NOUVEAU_GEM_DOMAIN_GART)
823 flags |= TTM_PL_FLAG_TT;
824
825 return flags;
826}
827
828int
829nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
830 struct drm_file *file_priv)
831{
832 struct drm_nouveau_gem_pin *req = data;
833 struct drm_gem_object *gem;
834 struct nouveau_bo *nvbo;
835 int ret = 0;
836
837 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
838
839 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
840 NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
841 return -EINVAL;
842 }
843
844 if (!DRM_SUSER(DRM_CURPROC))
845 return -EPERM;
846
847 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
848 if (!gem)
849 return -EINVAL;
850 nvbo = nouveau_gem_object(gem);
851
852 ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
853 if (ret)
854 goto out;
855
856 req->offset = nvbo->bo.offset;
857 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
858 req->domain = NOUVEAU_GEM_DOMAIN_GART;
859 else
860 req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
861
862out:
863 mutex_lock(&dev->struct_mutex);
864 drm_gem_object_unreference(gem);
865 mutex_unlock(&dev->struct_mutex);
866
867 return ret;
868}
869
870int
871nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
872 struct drm_file *file_priv)
873{
874 struct drm_nouveau_gem_pin *req = data;
875 struct drm_gem_object *gem;
876 int ret;
877
878 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
879
880 if (drm_core_check_feature(dev, DRIVER_MODESET))
881 return -EINVAL;
882
883 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
884 if (!gem)
885 return -EINVAL;
886
887 ret = nouveau_bo_unpin(nouveau_gem_object(gem));
888
889 mutex_lock(&dev->struct_mutex);
890 drm_gem_object_unreference(gem);
891 mutex_unlock(&dev->struct_mutex);
892
893 return ret;
894}
895
896int
897nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
898 struct drm_file *file_priv)
899{
900 struct drm_nouveau_gem_cpu_prep *req = data;
901 struct drm_gem_object *gem;
902 struct nouveau_bo *nvbo;
903 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
904 int ret = -EINVAL;
905
906 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
907
908 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
909 if (!gem)
910 return ret;
911 nvbo = nouveau_gem_object(gem);
912
913 if (nvbo->cpu_filp) {
914 if (nvbo->cpu_filp == file_priv)
915 goto out;
916
917 ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait);
918 if (ret == -ERESTART)
919 ret = -EAGAIN;
920 if (ret)
921 goto out;
922 }
923
924 if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) {
925 ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait);
926 } else {
927 ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait);
928 if (ret == -ERESTART)
929 ret = -EAGAIN;
930 else
931 if (ret == 0)
932 nvbo->cpu_filp = file_priv;
933 }
934
935out:
936 mutex_lock(&dev->struct_mutex);
937 drm_gem_object_unreference(gem);
938 mutex_unlock(&dev->struct_mutex);
939 return ret;
940}
941
942int
943nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
944 struct drm_file *file_priv)
945{
946 struct drm_nouveau_gem_cpu_prep *req = data;
947 struct drm_gem_object *gem;
948 struct nouveau_bo *nvbo;
949 int ret = -EINVAL;
950
951 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
952
953 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
954 if (!gem)
955 return ret;
956 nvbo = nouveau_gem_object(gem);
957
958 if (nvbo->cpu_filp != file_priv)
959 goto out;
960 nvbo->cpu_filp = NULL;
961
962 ttm_bo_synccpu_write_release(&nvbo->bo);
963 ret = 0;
964
965out:
966 mutex_lock(&dev->struct_mutex);
967 drm_gem_object_unreference(gem);
968 mutex_unlock(&dev->struct_mutex);
969 return ret;
970}
971
972int
973nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
974 struct drm_file *file_priv)
975{
976 struct drm_nouveau_gem_info *req = data;
977 struct drm_gem_object *gem;
978 int ret;
979
980 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
981
982 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
983 if (!gem)
984 return -EINVAL;
985
986 ret = nouveau_gem_info(gem, req);
987 mutex_lock(&dev->struct_mutex);
988 drm_gem_object_unreference(gem);
989 mutex_unlock(&dev->struct_mutex);
990 return ret;
991}
992
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
new file mode 100644
index 000000000000..dc46792a5c96
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -0,0 +1,1080 @@
1/*
2 * Copyright 2006 Dave Airlie
3 * Copyright 2007 Maarten Maathuis
4 * Copyright 2007-2009 Stuart Bennett
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
21 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_hw.h"
28
29#define CHIPSET_NFORCE 0x01a0
30#define CHIPSET_NFORCE2 0x01f0
31
32/*
33 * misc hw access wrappers/control functions
34 */
35
36void
37NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
38{
39 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
40 NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
41}
42
43uint8_t
44NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
45{
46 NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
47 return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
48}
49
50void
51NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
52{
53 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
54 NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
55}
56
57uint8_t
58NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
59{
60 NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
61 return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
62}
63
64/* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
65 * it affects only the 8 bit vga io regs, which we access using mmio at
66 * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
67 * in general, the set value of cr44 does not matter: reg access works as
68 * expected and values can be set for the appropriate head by using a 0x2000
69 * offset as required
70 * however:
71 * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
72 * cr44 must be set to 0 or 3 for accessing values on the correct head
73 * through the common 0xc03c* addresses
74 * b) in tied mode (4) head B is programmed to the values set on head A, and
75 * access using the head B addresses can have strange results, ergo we leave
76 * tied mode in init once we know to what cr44 should be restored on exit
77 *
78 * the owner parameter is slightly abused:
79 * 0 and 1 are treated as head values and so the set value is (owner * 3)
80 * other values are treated as literal values to set
81 */
82void
83NVSetOwner(struct drm_device *dev, int owner)
84{
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86
87 if (owner == 1)
88 owner *= 3;
89
90 if (dev_priv->chipset == 0x11) {
91 /* This might seem stupid, but the blob does it and
92 * omitting it often locks the system up.
93 */
94 NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
95 NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
96 }
97
98 /* CR44 is always changed on CRTC0 */
99 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
100
101 if (dev_priv->chipset == 0x11) { /* set me harder */
102 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
103 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
104 }
105}
106
107void
108NVBlankScreen(struct drm_device *dev, int head, bool blank)
109{
110 unsigned char seq1;
111
112 if (nv_two_heads(dev))
113 NVSetOwner(dev, head);
114
115 seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
116
117 NVVgaSeqReset(dev, head, true);
118 if (blank)
119 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
120 else
121 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
122 NVVgaSeqReset(dev, head, false);
123}
124
125/*
126 * PLL setting
127 */
128
129static int
130powerctrl_1_shift(int chip_version, int reg)
131{
132 int shift = -4;
133
134 if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
135 return shift;
136
137 switch (reg) {
138 case NV_RAMDAC_VPLL2:
139 shift += 4;
140 case NV_PRAMDAC_VPLL_COEFF:
141 shift += 4;
142 case NV_PRAMDAC_MPLL_COEFF:
143 shift += 4;
144 case NV_PRAMDAC_NVPLL_COEFF:
145 shift += 4;
146 }
147
148 /*
149 * the shift for vpll regs is only used for nv3x chips with a single
150 * stage pll
151 */
152 if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
153 chip_version == 0x36 || chip_version >= 0x40))
154 shift = -4;
155
156 return shift;
157}
158
159static void
160setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
161{
162 struct drm_nouveau_private *dev_priv = dev->dev_private;
163 int chip_version = dev_priv->vbios->chip_version;
164 uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
165 int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
166 uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
167 uint32_t saved_powerctrl_1 = 0;
168 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
169
170 if (oldpll == pll)
171 return; /* already set */
172
173 if (shift_powerctrl_1 >= 0) {
174 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
175 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
176 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
177 1 << shift_powerctrl_1);
178 }
179
180 if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
181 /* upclock -- write new post divider first */
182 NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
183 else
184 /* downclock -- write new NM first */
185 NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
186
187 if (chip_version < 0x17 && chip_version != 0x11)
188 /* wait a bit on older chips */
189 msleep(64);
190 NVReadRAMDAC(dev, 0, reg);
191
192 /* then write the other half as well */
193 NVWriteRAMDAC(dev, 0, reg, pll);
194
195 if (shift_powerctrl_1 >= 0)
196 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
197}
198
199static uint32_t
200new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
201{
202 bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
203
204 if (ss) /* single stage pll mode */
205 ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
206 NV_RAMDAC_580_VPLL2_ACTIVE;
207 else
208 ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
209 ~NV_RAMDAC_580_VPLL2_ACTIVE;
210
211 return ramdac580;
212}
213
214static void
215setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
216 struct nouveau_pll_vals *pv)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 int chip_version = dev_priv->vbios->chip_version;
220 bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
221 uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
222 uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
223 uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
224 uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
225 uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
226 uint32_t oldramdac580 = 0, ramdac580 = 0;
227 bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
228 uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
229 int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
230
231 /* model specific additions to generic pll1 and pll2 set up above */
232 if (nv3035) {
233 pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
234 (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
235 pll2 = 0;
236 }
237 if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
238 oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
239 ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
240 if (oldramdac580 != ramdac580)
241 oldpll1 = ~0; /* force mismatch */
242 if (single_stage)
243 /* magic value used by nvidia in single stage mode */
244 pll2 |= 0x011f;
245 }
246 if (chip_version > 0x70)
247 /* magic bits set by the blob (but not the bios) on g71-73 */
248 pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
249
250 if (oldpll1 == pll1 && oldpll2 == pll2)
251 return; /* already set */
252
253 if (shift_powerctrl_1 >= 0) {
254 saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
255 nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
256 (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
257 1 << shift_powerctrl_1);
258 }
259
260 if (chip_version >= 0x40) {
261 int shift_c040 = 14;
262
263 switch (reg1) {
264 case NV_PRAMDAC_MPLL_COEFF:
265 shift_c040 += 2;
266 case NV_PRAMDAC_NVPLL_COEFF:
267 shift_c040 += 2;
268 case NV_RAMDAC_VPLL2:
269 shift_c040 += 2;
270 case NV_PRAMDAC_VPLL_COEFF:
271 shift_c040 += 2;
272 }
273
274 savedc040 = nvReadMC(dev, 0xc040);
275 if (shift_c040 != 14)
276 nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
277 }
278
279 if (oldramdac580 != ramdac580)
280 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
281
282 if (!nv3035)
283 NVWriteRAMDAC(dev, 0, reg2, pll2);
284 NVWriteRAMDAC(dev, 0, reg1, pll1);
285
286 if (shift_powerctrl_1 >= 0)
287 nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
288 if (chip_version >= 0x40)
289 nvWriteMC(dev, 0xc040, savedc040);
290}
291
292static void
293setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
294 struct nouveau_pll_vals *pv)
295{
296 /* When setting PLLs, there is a merry game of disabling and enabling
297 * various bits of hardware during the process. This function is a
298 * synthesis of six nv4x traces, nearly each card doing a subtly
299 * different thing. With luck all the necessary bits for each card are
300 * combined herein. Without luck it deviates from each card's formula
301 * so as to not work on any :)
302 */
303
304 uint32_t Preg = NMNMreg - 4;
305 bool mpll = Preg == 0x4020;
306 uint32_t oldPval = nvReadMC(dev, Preg);
307 uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
308 uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
309 0xc << 28 | pv->log2P << 16;
310 uint32_t saved4600 = 0;
311 /* some cards have different maskc040s */
312 uint32_t maskc040 = ~(3 << 14), savedc040;
313 bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
314
315 if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
316 return;
317
318 if (Preg == 0x4000)
319 maskc040 = ~0x333;
320 if (Preg == 0x4058)
321 maskc040 = ~(0xc << 24);
322
323 if (mpll) {
324 struct pll_lims pll_lim;
325 uint8_t Pval2;
326
327 if (get_pll_limits(dev, Preg, &pll_lim))
328 return;
329
330 Pval2 = pv->log2P + pll_lim.log2p_bias;
331 if (Pval2 > pll_lim.max_log2p)
332 Pval2 = pll_lim.max_log2p;
333 Pval |= 1 << 28 | Pval2 << 20;
334
335 saved4600 = nvReadMC(dev, 0x4600);
336 nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
337 }
338 if (single_stage)
339 Pval |= mpll ? 1 << 12 : 1 << 8;
340
341 nvWriteMC(dev, Preg, oldPval | 1 << 28);
342 nvWriteMC(dev, Preg, Pval & ~(4 << 28));
343 if (mpll) {
344 Pval |= 8 << 20;
345 nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
346 nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
347 }
348
349 savedc040 = nvReadMC(dev, 0xc040);
350 nvWriteMC(dev, 0xc040, savedc040 & maskc040);
351
352 nvWriteMC(dev, NMNMreg, NMNM);
353 if (NMNMreg == 0x4024)
354 nvWriteMC(dev, 0x403c, NMNM);
355
356 nvWriteMC(dev, Preg, Pval);
357 if (mpll) {
358 Pval &= ~(8 << 20);
359 nvWriteMC(dev, 0x4020, Pval);
360 nvWriteMC(dev, 0x4038, Pval);
361 nvWriteMC(dev, 0x4600, saved4600);
362 }
363
364 nvWriteMC(dev, 0xc040, savedc040);
365
366 if (mpll) {
367 nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
368 nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
369 }
370}
371
372void
373nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
374 struct nouveau_pll_vals *pv)
375{
376 struct drm_nouveau_private *dev_priv = dev->dev_private;
377 int cv = dev_priv->vbios->chip_version;
378
379 if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
380 cv >= 0x40) {
381 if (reg1 > 0x405c)
382 setPLL_double_highregs(dev, reg1, pv);
383 else
384 setPLL_double_lowregs(dev, reg1, pv);
385 } else
386 setPLL_single(dev, reg1, pv);
387}
388
389/*
390 * PLL getting
391 */
392
393static void
394nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
395 uint32_t pll2, struct nouveau_pll_vals *pllvals)
396{
397 struct drm_nouveau_private *dev_priv = dev->dev_private;
398
399 /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
400
401 /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
402 pllvals->log2P = (pll1 >> 16) & 0x7;
403 pllvals->N2 = pllvals->M2 = 1;
404
405 if (reg1 <= 0x405c) {
406 pllvals->NM1 = pll2 & 0xffff;
407 /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
408 if (!(pll1 & 0x1100))
409 pllvals->NM2 = pll2 >> 16;
410 } else {
411 pllvals->NM1 = pll1 & 0xffff;
412 if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
413 pllvals->NM2 = pll2 & 0xffff;
414 else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
415 pllvals->M1 &= 0xf; /* only 4 bits */
416 if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
417 pllvals->M2 = (pll1 >> 4) & 0x7;
418 pllvals->N2 = ((pll1 >> 21) & 0x18) |
419 ((pll1 >> 19) & 0x7);
420 }
421 }
422 }
423}
424
425int
426nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
427 struct nouveau_pll_vals *pllvals)
428{
429 struct drm_nouveau_private *dev_priv = dev->dev_private;
430 const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
431 NV_PRAMDAC_MPLL_COEFF,
432 NV_PRAMDAC_VPLL_COEFF,
433 NV_RAMDAC_VPLL2 };
434 const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
435 0x4020,
436 NV_PRAMDAC_VPLL_COEFF,
437 NV_RAMDAC_VPLL2 };
438 uint32_t reg1, pll1, pll2 = 0;
439 struct pll_lims pll_lim;
440 int ret;
441
442 if (dev_priv->card_type < NV_40)
443 reg1 = nv04_regs[plltype];
444 else
445 reg1 = nv40_regs[plltype];
446
447 pll1 = nvReadMC(dev, reg1);
448
449 if (reg1 <= 0x405c)
450 pll2 = nvReadMC(dev, reg1 + 4);
451 else if (nv_two_reg_pll(dev)) {
452 uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
453
454 pll2 = nvReadMC(dev, reg2);
455 }
456
457 if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
458 uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
459
460 /* check whether vpll has been forced into single stage mode */
461 if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
462 if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
463 pll2 = 0;
464 } else
465 if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
466 pll2 = 0;
467 }
468
469 nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
470
471 ret = get_pll_limits(dev, plltype, &pll_lim);
472 if (ret)
473 return ret;
474
475 pllvals->refclk = pll_lim.refclk;
476
477 return 0;
478}
479
480int
481nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
482{
483 /* Avoid divide by zero if called at an inappropriate time */
484 if (!pv->M1 || !pv->M2)
485 return 0;
486
487 return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
488}
489
490int
491nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
492{
493 struct nouveau_pll_vals pllvals;
494
495 if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
496 uint32_t mpllP;
497
498 pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
499 if (!mpllP)
500 mpllP = 4;
501
502 return 400000 / mpllP;
503 } else
504 if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
505 uint32_t clock;
506
507 pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
508 return clock;
509 }
510
511 nouveau_hw_get_pllvals(dev, plltype, &pllvals);
512
513 return nouveau_hw_pllvals_to_clk(&pllvals);
514}
515
516static void
517nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
518{
519 /* the vpll on an unused head can come up with a random value, way
520 * beyond the pll limits. for some reason this causes the chip to
521 * lock up when reading the dac palette regs, so set a valid pll here
522 * when such a condition detected. only seen on nv11 to date
523 */
524
525 struct pll_lims pll_lim;
526 struct nouveau_pll_vals pv;
527 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
528
529 if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
530 return;
531 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
532
533 if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
534 pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
535 pv.log2P <= pll_lim.max_log2p)
536 return;
537
538 NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
539
540 /* set lowest clock within static limits */
541 pv.M1 = pll_lim.vco1.max_m;
542 pv.N1 = pll_lim.vco1.min_n;
543 pv.log2P = pll_lim.max_usable_log2p;
544 nouveau_hw_setpll(dev, pllreg, &pv);
545}
546
547/*
548 * vga font save/restore
549 */
550
551static void nouveau_vga_font_io(struct drm_device *dev,
552 void __iomem *iovram,
553 bool save, unsigned plane)
554{
555 struct drm_nouveau_private *dev_priv = dev->dev_private;
556 unsigned i;
557
558 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
559 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
560 for (i = 0; i < 16384; i++) {
561 if (save) {
562 dev_priv->saved_vga_font[plane][i] =
563 ioread32_native(iovram + i * 4);
564 } else {
565 iowrite32_native(dev_priv->saved_vga_font[plane][i],
566 iovram + i * 4);
567 }
568 }
569}
570
571void
572nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
573{
574 uint8_t misc, gr4, gr5, gr6, seq2, seq4;
575 bool graphicsmode;
576 unsigned plane;
577 void __iomem *iovram;
578
579 if (nv_two_heads(dev))
580 NVSetOwner(dev, 0);
581
582 NVSetEnablePalette(dev, 0, true);
583 graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
584 NVSetEnablePalette(dev, 0, false);
585
586 if (graphicsmode) /* graphics mode => framebuffer => no need to save */
587 return;
588
589 NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
590
591 /* map first 64KiB of VRAM, holds VGA fonts etc */
592 iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
593 if (!iovram) {
594 NV_ERROR(dev, "Failed to map VRAM, "
595 "cannot save/restore VGA fonts.\n");
596 return;
597 }
598
599 if (nv_two_heads(dev))
600 NVBlankScreen(dev, 1, true);
601 NVBlankScreen(dev, 0, true);
602
603 /* save control regs */
604 misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
605 seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
606 seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
607 gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
608 gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
609 gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
610
611 NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
612 NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
613 NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
614 NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
615
616 /* store font in planes 0..3 */
617 for (plane = 0; plane < 4; plane++)
618 nouveau_vga_font_io(dev, iovram, save, plane);
619
620 /* restore control regs */
621 NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
622 NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
623 NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
624 NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
625 NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
626 NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
627
628 if (nv_two_heads(dev))
629 NVBlankScreen(dev, 1, false);
630 NVBlankScreen(dev, 0, false);
631
632 iounmap(iovram);
633}
634
635/*
636 * mode state save/load
637 */
638
639static void
640rd_cio_state(struct drm_device *dev, int head,
641 struct nv04_crtc_reg *crtcstate, int index)
642{
643 crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
644}
645
646static void
647wr_cio_state(struct drm_device *dev, int head,
648 struct nv04_crtc_reg *crtcstate, int index)
649{
650 NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
651}
652
653static void
654nv_save_state_ramdac(struct drm_device *dev, int head,
655 struct nv04_mode_state *state)
656{
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
659 int i;
660
661 if (dev_priv->card_type >= NV_10)
662 regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
663
664 nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
665 state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
666 if (nv_two_heads(dev))
667 state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
668 if (dev_priv->chipset == 0x11)
669 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
670
671 regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
672
673 if (nv_gf4_disp_arch(dev))
674 regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
675 if (dev_priv->chipset >= 0x30)
676 regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
677
678 regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
679 regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
680 regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
681 regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
682 regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
683 regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
684 regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
685 regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
686
687 for (i = 0; i < 7; i++) {
688 uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
689 regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
690 regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
691 }
692
693 if (nv_gf4_disp_arch(dev)) {
694 regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
695 for (i = 0; i < 3; i++) {
696 regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
697 regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
698 }
699 }
700
701 regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
702 regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
703 if (!nv_gf4_disp_arch(dev) && head == 0) {
704 /* early chips don't allow access to PRAMDAC_TMDS_* without
705 * the head A FPCLK on (nv11 even locks up) */
706 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
707 ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
708 }
709 regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
710 regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
711
712 regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
713
714 if (nv_gf4_disp_arch(dev))
715 regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
716
717 if (dev_priv->card_type == NV_40) {
718 regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
719 regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
720 regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
721
722 for (i = 0; i < 38; i++)
723 regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
724 NV_PRAMDAC_CTV + 4*i);
725 }
726}
727
728static void
729nv_load_state_ramdac(struct drm_device *dev, int head,
730 struct nv04_mode_state *state)
731{
732 struct drm_nouveau_private *dev_priv = dev->dev_private;
733 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
734 uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
735 int i;
736
737 if (dev_priv->card_type >= NV_10)
738 NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
739
740 nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
741 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
742 if (nv_two_heads(dev))
743 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
744 if (dev_priv->chipset == 0x11)
745 NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
746
747 NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
748
749 if (nv_gf4_disp_arch(dev))
750 NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
751 if (dev_priv->chipset >= 0x30)
752 NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
753
754 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
755 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
756 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
757 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
758 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
759 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
760 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
761 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
762
763 for (i = 0; i < 7; i++) {
764 uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
765
766 NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
767 NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
768 }
769
770 if (nv_gf4_disp_arch(dev)) {
771 NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
772 for (i = 0; i < 3; i++) {
773 NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
774 NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
775 }
776 }
777
778 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
779 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
780 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
781 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
782
783 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
784
785 if (nv_gf4_disp_arch(dev))
786 NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
787
788 if (dev_priv->card_type == NV_40) {
789 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
790 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
791 NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
792
793 for (i = 0; i < 38; i++)
794 NVWriteRAMDAC(dev, head,
795 NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
796 }
797}
798
799static void
800nv_save_state_vga(struct drm_device *dev, int head,
801 struct nv04_mode_state *state)
802{
803 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
804 int i;
805
806 regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
807
808 for (i = 0; i < 25; i++)
809 rd_cio_state(dev, head, regp, i);
810
811 NVSetEnablePalette(dev, head, true);
812 for (i = 0; i < 21; i++)
813 regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
814 NVSetEnablePalette(dev, head, false);
815
816 for (i = 0; i < 9; i++)
817 regp->Graphics[i] = NVReadVgaGr(dev, head, i);
818
819 for (i = 0; i < 5; i++)
820 regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
821}
822
823static void
824nv_load_state_vga(struct drm_device *dev, int head,
825 struct nv04_mode_state *state)
826{
827 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
828 int i;
829
830 NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
831
832 for (i = 0; i < 5; i++)
833 NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
834
835 nv_lock_vga_crtc_base(dev, head, false);
836 for (i = 0; i < 25; i++)
837 wr_cio_state(dev, head, regp, i);
838 nv_lock_vga_crtc_base(dev, head, true);
839
840 for (i = 0; i < 9; i++)
841 NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
842
843 NVSetEnablePalette(dev, head, true);
844 for (i = 0; i < 21; i++)
845 NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
846 NVSetEnablePalette(dev, head, false);
847}
848
849static void
850nv_save_state_ext(struct drm_device *dev, int head,
851 struct nv04_mode_state *state)
852{
853 struct drm_nouveau_private *dev_priv = dev->dev_private;
854 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
855 int i;
856
857 rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
858 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
859 rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
860 rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
861 rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
862 rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
863 rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
864
865 rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
866 rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
867 rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
868 if (dev_priv->card_type >= NV_30)
869 rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
870 rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
871 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
872 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
873 rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
874 rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
875
876 if (dev_priv->card_type >= NV_10) {
877 regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
878 regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
879
880 if (dev_priv->card_type >= NV_30)
881 regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
882
883 if (dev_priv->card_type == NV_40)
884 regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
885
886 if (nv_two_heads(dev))
887 regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
888 regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
889 }
890
891 regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
892
893 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
894 rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
895 if (dev_priv->card_type >= NV_10) {
896 rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
897 rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
898 rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
899 rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
900 }
901 /* NV11 and NV20 don't have this, they stop at 0x52. */
902 if (nv_gf4_disp_arch(dev)) {
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
904 rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
905
906 for (i = 0; i < 0x10; i++)
907 regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
908 rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
909 rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
910
911 rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
912 rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
913 }
914
915 regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
916}
917
918static void
919nv_load_state_ext(struct drm_device *dev, int head,
920 struct nv04_mode_state *state)
921{
922 struct drm_nouveau_private *dev_priv = dev->dev_private;
923 struct nv04_crtc_reg *regp = &state->crtc_reg[head];
924 uint32_t reg900;
925 int i;
926
927 if (dev_priv->card_type >= NV_10) {
928 if (nv_two_heads(dev))
929 /* setting ENGINE_CTRL (EC) *must* come before
930 * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
931 * EC that should not be overwritten by writing stale EC
932 */
933 NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
934
935 nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
936 nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
937 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
938 nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
939 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
940 nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
941 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
942 nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
943 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
944
945 NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
946 NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
947 NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
948
949 if (dev_priv->card_type >= NV_30)
950 NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
951
952 if (dev_priv->card_type == NV_40) {
953 NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
954
955 reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
956 if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
957 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
958 else
959 NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
960 }
961 }
962
963 NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
964
965 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
966 wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
967 wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
968 wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
969 wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
970 wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
971 wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
972 wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
973 wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
974 if (dev_priv->card_type >= NV_30)
975 wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
976
977 wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
978 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
979 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
980 wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
981 if (dev_priv->card_type == NV_40)
982 nv_fix_nv40_hw_cursor(dev, head);
983 wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
984
985 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
986 wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
987 if (dev_priv->card_type >= NV_10) {
988 wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
989 wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
990 wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
991 wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
992 }
993 /* NV11 and NV20 stop at 0x52. */
994 if (nv_gf4_disp_arch(dev)) {
995 if (dev_priv->card_type == NV_10) {
996 /* Not waiting for vertical retrace before modifying
997 CRE_53/CRE_54 causes lockups. */
998 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
999 nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1000 }
1001
1002 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
1003 wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
1004
1005 for (i = 0; i < 0x10; i++)
1006 NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
1008 wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
1009
1010 wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
1011 wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
1012 }
1013
1014 NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
1015
1016 /* Setting 1 on this value gives you interrupts for every vblank period. */
1017 NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
1018 NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
1019}
1020
1021static void
1022nv_save_state_palette(struct drm_device *dev, int head,
1023 struct nv04_mode_state *state)
1024{
1025 int head_offset = head * NV_PRMDIO_SIZE, i;
1026
1027 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
1028 NV_PRMDIO_PIXEL_MASK_MASK);
1029 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
1030
1031 for (i = 0; i < 768; i++) {
1032 state->crtc_reg[head].DAC[i] = nv_rd08(dev,
1033 NV_PRMDIO_PALETTE_DATA + head_offset);
1034 }
1035
1036 NVSetEnablePalette(dev, head, false);
1037}
1038
1039void
1040nouveau_hw_load_state_palette(struct drm_device *dev, int head,
1041 struct nv04_mode_state *state)
1042{
1043 int head_offset = head * NV_PRMDIO_SIZE, i;
1044
1045 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
1046 NV_PRMDIO_PIXEL_MASK_MASK);
1047 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
1048
1049 for (i = 0; i < 768; i++) {
1050 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
1051 state->crtc_reg[head].DAC[i]);
1052 }
1053
1054 NVSetEnablePalette(dev, head, false);
1055}
1056
1057void nouveau_hw_save_state(struct drm_device *dev, int head,
1058 struct nv04_mode_state *state)
1059{
1060 struct drm_nouveau_private *dev_priv = dev->dev_private;
1061
1062 if (dev_priv->chipset == 0x11)
1063 /* NB: no attempt is made to restore the bad pll later on */
1064 nouveau_hw_fix_bad_vpll(dev, head);
1065 nv_save_state_ramdac(dev, head, state);
1066 nv_save_state_vga(dev, head, state);
1067 nv_save_state_palette(dev, head, state);
1068 nv_save_state_ext(dev, head, state);
1069}
1070
1071void nouveau_hw_load_state(struct drm_device *dev, int head,
1072 struct nv04_mode_state *state)
1073{
1074 NVVgaProtect(dev, head, true);
1075 nv_load_state_ramdac(dev, head, state);
1076 nv_load_state_ext(dev, head, state);
1077 nouveau_hw_load_state_palette(dev, head, state);
1078 nv_load_state_vga(dev, head, state);
1079 NVVgaProtect(dev, head, false);
1080}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.h b/drivers/gpu/drm/nouveau/nouveau_hw.h
new file mode 100644
index 000000000000..869130f83602
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.h
@@ -0,0 +1,455 @@
1/*
2 * Copyright 2008 Stuart Bennett
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_HW_H__
24#define __NOUVEAU_HW_H__
25
26#include "drmP.h"
27#include "nouveau_drv.h"
28
29#define MASK(field) ( \
30 (0xffffffff >> (31 - ((1 ? field) - (0 ? field)))) << (0 ? field))
31
32#define XLATE(src, srclowbit, outfield) ( \
33 (((src) >> (srclowbit)) << (0 ? outfield)) & MASK(outfield))
34
35void NVWriteVgaSeq(struct drm_device *, int head, uint8_t index, uint8_t value);
36uint8_t NVReadVgaSeq(struct drm_device *, int head, uint8_t index);
37void NVWriteVgaGr(struct drm_device *, int head, uint8_t index, uint8_t value);
38uint8_t NVReadVgaGr(struct drm_device *, int head, uint8_t index);
39void NVSetOwner(struct drm_device *, int owner);
40void NVBlankScreen(struct drm_device *, int head, bool blank);
41void nouveau_hw_setpll(struct drm_device *, uint32_t reg1,
42 struct nouveau_pll_vals *pv);
43int nouveau_hw_get_pllvals(struct drm_device *, enum pll_types plltype,
44 struct nouveau_pll_vals *pllvals);
45int nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pllvals);
46int nouveau_hw_get_clock(struct drm_device *, enum pll_types plltype);
47void nouveau_hw_save_vga_fonts(struct drm_device *, bool save);
48void nouveau_hw_save_state(struct drm_device *, int head,
49 struct nv04_mode_state *state);
50void nouveau_hw_load_state(struct drm_device *, int head,
51 struct nv04_mode_state *state);
52void nouveau_hw_load_state_palette(struct drm_device *, int head,
53 struct nv04_mode_state *state);
54
55/* nouveau_calc.c */
56extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
57 int *burst, int *lwm);
58extern int nouveau_calc_pll_mnp(struct drm_device *, struct pll_lims *pll_lim,
59 int clk, struct nouveau_pll_vals *pv);
60
61static inline uint32_t
62nvReadMC(struct drm_device *dev, uint32_t reg)
63{
64 uint32_t val = nv_rd32(dev, reg);
65 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
66 return val;
67}
68
69static inline void
70nvWriteMC(struct drm_device *dev, uint32_t reg, uint32_t val)
71{
72 NV_REG_DEBUG(MC, dev, "reg %08x val %08x\n", reg, val);
73 nv_wr32(dev, reg, val);
74}
75
76static inline uint32_t
77nvReadVIDEO(struct drm_device *dev, uint32_t reg)
78{
79 uint32_t val = nv_rd32(dev, reg);
80 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
81 return val;
82}
83
84static inline void
85nvWriteVIDEO(struct drm_device *dev, uint32_t reg, uint32_t val)
86{
87 NV_REG_DEBUG(VIDEO, dev, "reg %08x val %08x\n", reg, val);
88 nv_wr32(dev, reg, val);
89}
90
91static inline uint32_t
92nvReadFB(struct drm_device *dev, uint32_t reg)
93{
94 uint32_t val = nv_rd32(dev, reg);
95 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
96 return val;
97}
98
99static inline void
100nvWriteFB(struct drm_device *dev, uint32_t reg, uint32_t val)
101{
102 NV_REG_DEBUG(FB, dev, "reg %08x val %08x\n", reg, val);
103 nv_wr32(dev, reg, val);
104}
105
106static inline uint32_t
107nvReadEXTDEV(struct drm_device *dev, uint32_t reg)
108{
109 uint32_t val = nv_rd32(dev, reg);
110 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
111 return val;
112}
113
114static inline void
115nvWriteEXTDEV(struct drm_device *dev, uint32_t reg, uint32_t val)
116{
117 NV_REG_DEBUG(EXTDEV, dev, "reg %08x val %08x\n", reg, val);
118 nv_wr32(dev, reg, val);
119}
120
121static inline uint32_t NVReadCRTC(struct drm_device *dev,
122 int head, uint32_t reg)
123{
124 uint32_t val;
125 if (head)
126 reg += NV_PCRTC0_SIZE;
127 val = nv_rd32(dev, reg);
128 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
129 return val;
130}
131
132static inline void NVWriteCRTC(struct drm_device *dev,
133 int head, uint32_t reg, uint32_t val)
134{
135 if (head)
136 reg += NV_PCRTC0_SIZE;
137 NV_REG_DEBUG(CRTC, dev, "head %d reg %08x val %08x\n", head, reg, val);
138 nv_wr32(dev, reg, val);
139}
140
141static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
142 int head, uint32_t reg)
143{
144 uint32_t val;
145 if (head)
146 reg += NV_PRAMDAC0_SIZE;
147 val = nv_rd32(dev, reg);
148 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
149 head, reg, val);
150 return val;
151}
152
153static inline void NVWriteRAMDAC(struct drm_device *dev,
154 int head, uint32_t reg, uint32_t val)
155{
156 if (head)
157 reg += NV_PRAMDAC0_SIZE;
158 NV_REG_DEBUG(RAMDAC, dev, "head %d reg %08x val %08x\n",
159 head, reg, val);
160 nv_wr32(dev, reg, val);
161}
162
163static inline uint8_t nv_read_tmds(struct drm_device *dev,
164 int or, int dl, uint8_t address)
165{
166 int ramdac = (or & OUTPUT_C) >> 2;
167
168 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8,
169 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | address);
170 return NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8);
171}
172
173static inline void nv_write_tmds(struct drm_device *dev,
174 int or, int dl, uint8_t address,
175 uint8_t data)
176{
177 int ramdac = (or & OUTPUT_C) >> 2;
178
179 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA + dl * 8, data);
180 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL + dl * 8, address);
181}
182
183static inline void NVWriteVgaCrtc(struct drm_device *dev,
184 int head, uint8_t index, uint8_t value)
185{
186 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
187 head, index, value);
188 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
189 nv_wr08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
190}
191
192static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
193 int head, uint8_t index)
194{
195 uint8_t val;
196 nv_wr08(dev, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
197 val = nv_rd08(dev, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
198 NV_REG_DEBUG(VGACRTC, dev, "head %d index 0x%02x data 0x%02x\n",
199 head, index, val);
200 return val;
201}
202
203/* CR57 and CR58 are a fun pair of regs. CR57 provides an index (0-0xf) for CR58
204 * I suspect they in fact do nothing, but are merely a way to carry useful
205 * per-head variables around
206 *
207 * Known uses:
208 * CR57 CR58
209 * 0x00 index to the appropriate dcb entry (or 7f for inactive)
210 * 0x02 dcb entry's "or" value (or 00 for inactive)
211 * 0x03 bit0 set for dual link (LVDS, possibly elsewhere too)
212 * 0x08 or 0x09 pxclk in MHz
213 * 0x0f laptop panel info - low nibble for PEXTDEV_BOOT_0 strap
214 * high nibble for xlat strap value
215 */
216
217static inline void
218NVWriteVgaCrtc5758(struct drm_device *dev, int head, uint8_t index, uint8_t value)
219{
220 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
221 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_58, value);
222}
223
224static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_t index)
225{
226 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_57, index);
227 return NVReadVgaCrtc(dev, head, NV_CIO_CRE_58);
228}
229
230static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
231 int head, uint32_t reg)
232{
233 struct drm_nouveau_private *dev_priv = dev->dev_private;
234 uint8_t val;
235
236 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
237 * NVSetOwner for the relevant head to be programmed */
238 if (head && dev_priv->card_type == NV_40)
239 reg += NV_PRMVIO_SIZE;
240
241 val = nv_rd08(dev, reg);
242 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n", head, reg, val);
243 return val;
244}
245
246static inline void NVWritePRMVIO(struct drm_device *dev,
247 int head, uint32_t reg, uint8_t value)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250
251 /* Only NV4x have two pvio ranges; other twoHeads cards MUST call
252 * NVSetOwner for the relevant head to be programmed */
253 if (head && dev_priv->card_type == NV_40)
254 reg += NV_PRMVIO_SIZE;
255
256 NV_REG_DEBUG(RMVIO, dev, "head %d reg %08x val %02x\n",
257 head, reg, value);
258 nv_wr08(dev, reg, value);
259}
260
261static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
262{
263 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
264 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
265}
266
267static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
268{
269 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
270 return !(nv_rd08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
271}
272
273static inline void NVWriteVgaAttr(struct drm_device *dev,
274 int head, uint8_t index, uint8_t value)
275{
276 if (NVGetEnablePalette(dev, head))
277 index &= ~0x20;
278 else
279 index |= 0x20;
280
281 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
282 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
283 head, index, value);
284 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
285 nv_wr08(dev, NV_PRMCIO_AR__WRITE + head * NV_PRMCIO_SIZE, value);
286}
287
288static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
289 int head, uint8_t index)
290{
291 uint8_t val;
292 if (NVGetEnablePalette(dev, head))
293 index &= ~0x20;
294 else
295 index |= 0x20;
296
297 nv_rd08(dev, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
298 nv_wr08(dev, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, index);
299 val = nv_rd08(dev, NV_PRMCIO_AR__READ + head * NV_PRMCIO_SIZE);
300 NV_REG_DEBUG(VGAATTR, dev, "head %d index 0x%02x data 0x%02x\n",
301 head, index, val);
302 return val;
303}
304
305static inline void NVVgaSeqReset(struct drm_device *dev, int head, bool start)
306{
307 NVWriteVgaSeq(dev, head, NV_VIO_SR_RESET_INDEX, start ? 0x1 : 0x3);
308}
309
310static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
311{
312 uint8_t seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
313
314 if (protect) {
315 NVVgaSeqReset(dev, head, true);
316 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
317 } else {
318 /* Reenable sequencer, then turn on screen */
319 NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20); /* reenable display */
320 NVVgaSeqReset(dev, head, false);
321 }
322 NVSetEnablePalette(dev, head, protect);
323}
324
325static inline bool
326nv_heads_tied(struct drm_device *dev)
327{
328 struct drm_nouveau_private *dev_priv = dev->dev_private;
329
330 if (dev_priv->chipset == 0x11)
331 return !!(nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28));
332
333 return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
334}
335
336/* makes cr0-7 on the specified head read-only */
337static inline bool
338nv_lock_vga_crtc_base(struct drm_device *dev, int head, bool lock)
339{
340 uint8_t cr11 = NVReadVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX);
341 bool waslocked = cr11 & 0x80;
342
343 if (lock)
344 cr11 |= 0x80;
345 else
346 cr11 &= ~0x80;
347 NVWriteVgaCrtc(dev, head, NV_CIO_CR_VRE_INDEX, cr11);
348
349 return waslocked;
350}
351
352static inline void
353nv_lock_vga_crtc_shadow(struct drm_device *dev, int head, int lock)
354{
355 /* shadow lock: connects 0x60?3d? regs to "real" 0x3d? regs
356 * bit7: unlocks HDT, HBS, HBE, HRS, HRE, HEB
357 * bit6: seems to have some effect on CR09 (double scan, VBS_9)
358 * bit5: unlocks HDE
359 * bit4: unlocks VDE
360 * bit3: unlocks VDT, OVL, VRS, ?VRE?, VBS, VBE, LSR, EBR
361 * bit2: same as bit 1 of 0x60?804
362 * bit0: same as bit 0 of 0x60?804
363 */
364
365 uint8_t cr21 = lock;
366
367 if (lock < 0)
368 /* 0xfa is generic "unlock all" mask */
369 cr21 = NVReadVgaCrtc(dev, head, NV_CIO_CRE_21) | 0xfa;
370
371 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_21, cr21);
372}
373
374/* renders the extended crtc regs (cr19+) on all crtcs impervious:
375 * immutable and unreadable
376 */
377static inline bool
378NVLockVgaCrtcs(struct drm_device *dev, bool lock)
379{
380 struct drm_nouveau_private *dev_priv = dev->dev_private;
381 bool waslocked = !NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
382
383 NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
384 lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
385 /* NV11 has independently lockable extended crtcs, except when tied */
386 if (dev_priv->chipset == 0x11 && !nv_heads_tied(dev))
387 NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
388 lock ? NV_CIO_SR_LOCK_VALUE :
389 NV_CIO_SR_UNLOCK_RW_VALUE);
390
391 return waslocked;
392}
393
394/* nv04 cursor max dimensions of 32x32 (A1R5G5B5) */
395#define NV04_CURSOR_SIZE 32
396/* limit nv10 cursors to 64x64 (ARGB8) (we could go to 64x255) */
397#define NV10_CURSOR_SIZE 64
398
399static inline int nv_cursor_width(struct drm_device *dev)
400{
401 struct drm_nouveau_private *dev_priv = dev->dev_private;
402
403 return dev_priv->card_type >= NV_10 ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
404}
405
406static inline void
407nv_fix_nv40_hw_cursor(struct drm_device *dev, int head)
408{
409 /* on some nv40 (such as the "true" (in the NV_PFB_BOOT_0 sense) nv40,
410 * the gf6800gt) a hardware bug requires a write to PRAMDAC_CURSOR_POS
411 * for changes to the CRTC CURCTL regs to take effect, whether changing
412 * the pixmap location, or just showing/hiding the cursor
413 */
414 uint32_t curpos = NVReadRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS);
415 NVWriteRAMDAC(dev, head, NV_PRAMDAC_CU_START_POS, curpos);
416}
417
418static inline void
419nv_show_cursor(struct drm_device *dev, int head, bool show)
420{
421 struct drm_nouveau_private *dev_priv = dev->dev_private;
422 uint8_t *curctl1 =
423 &dev_priv->mode_reg.crtc_reg[head].CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX];
424
425 if (show)
426 *curctl1 |= MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
427 else
428 *curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
429 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
430
431 if (dev_priv->card_type == NV_40)
432 nv_fix_nv40_hw_cursor(dev, head);
433}
434
435static inline uint32_t
436nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
437{
438 struct drm_nouveau_private *dev_priv = dev->dev_private;
439 int mask;
440
441 if (bpp == 15)
442 bpp = 16;
443 if (bpp == 24)
444 bpp = 8;
445
446 /* Alignment requirements taken from the Haiku driver */
447 if (dev_priv->card_type == NV_04)
448 mask = 128 / bpp - 1;
449 else
450 mask = 512 / bpp - 1;
451
452 return (width + mask) & ~mask;
453}
454
455#endif /* __NOUVEAU_HW_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
new file mode 100644
index 000000000000..70e994d28122
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_i2c.h"
28#include "nouveau_hw.h"
29
30static void
31nv04_i2c_setscl(void *data, int state)
32{
33 struct nouveau_i2c_chan *i2c = data;
34 struct drm_device *dev = i2c->dev;
35 uint8_t val;
36
37 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
38 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
39}
40
41static void
42nv04_i2c_setsda(void *data, int state)
43{
44 struct nouveau_i2c_chan *i2c = data;
45 struct drm_device *dev = i2c->dev;
46 uint8_t val;
47
48 val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
49 NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
50}
51
52static int
53nv04_i2c_getscl(void *data)
54{
55 struct nouveau_i2c_chan *i2c = data;
56 struct drm_device *dev = i2c->dev;
57
58 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
59}
60
61static int
62nv04_i2c_getsda(void *data)
63{
64 struct nouveau_i2c_chan *i2c = data;
65 struct drm_device *dev = i2c->dev;
66
67 return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
68}
69
70static void
71nv4e_i2c_setscl(void *data, int state)
72{
73 struct nouveau_i2c_chan *i2c = data;
74 struct drm_device *dev = i2c->dev;
75 uint8_t val;
76
77 val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
78 nv_wr32(dev, i2c->wr, val | 0x01);
79}
80
81static void
82nv4e_i2c_setsda(void *data, int state)
83{
84 struct nouveau_i2c_chan *i2c = data;
85 struct drm_device *dev = i2c->dev;
86 uint8_t val;
87
88 val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
89 nv_wr32(dev, i2c->wr, val | 0x01);
90}
91
92static int
93nv4e_i2c_getscl(void *data)
94{
95 struct nouveau_i2c_chan *i2c = data;
96 struct drm_device *dev = i2c->dev;
97
98 return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
99}
100
101static int
102nv4e_i2c_getsda(void *data)
103{
104 struct nouveau_i2c_chan *i2c = data;
105 struct drm_device *dev = i2c->dev;
106
107 return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
108}
109
110static int
111nv50_i2c_getscl(void *data)
112{
113 struct nouveau_i2c_chan *i2c = data;
114 struct drm_device *dev = i2c->dev;
115
116 return !!(nv_rd32(dev, i2c->rd) & 1);
117}
118
119
120static int
121nv50_i2c_getsda(void *data)
122{
123 struct nouveau_i2c_chan *i2c = data;
124 struct drm_device *dev = i2c->dev;
125
126 return !!(nv_rd32(dev, i2c->rd) & 2);
127}
128
129static void
130nv50_i2c_setscl(void *data, int state)
131{
132 struct nouveau_i2c_chan *i2c = data;
133 struct drm_device *dev = i2c->dev;
134
135 nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
136}
137
138static void
139nv50_i2c_setsda(void *data, int state)
140{
141 struct nouveau_i2c_chan *i2c = data;
142 struct drm_device *dev = i2c->dev;
143
144 nv_wr32(dev, i2c->wr,
145 (nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
146 i2c->data = state;
147}
148
149static const uint32_t nv50_i2c_port[] = {
150 0x00e138, 0x00e150, 0x00e168, 0x00e180,
151 0x00e254, 0x00e274, 0x00e764, 0x00e780,
152 0x00e79c, 0x00e7b8
153};
154#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
155
156int
157nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 struct nouveau_i2c_chan *i2c;
161 int ret;
162
163 if (entry->chan)
164 return -EEXIST;
165
166 if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
167 NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
168 return -EINVAL;
169 }
170
171 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
172 if (i2c == NULL)
173 return -ENOMEM;
174
175 switch (entry->port_type) {
176 case 0:
177 i2c->algo.bit.setsda = nv04_i2c_setsda;
178 i2c->algo.bit.setscl = nv04_i2c_setscl;
179 i2c->algo.bit.getsda = nv04_i2c_getsda;
180 i2c->algo.bit.getscl = nv04_i2c_getscl;
181 i2c->rd = entry->read;
182 i2c->wr = entry->write;
183 break;
184 case 4:
185 i2c->algo.bit.setsda = nv4e_i2c_setsda;
186 i2c->algo.bit.setscl = nv4e_i2c_setscl;
187 i2c->algo.bit.getsda = nv4e_i2c_getsda;
188 i2c->algo.bit.getscl = nv4e_i2c_getscl;
189 i2c->rd = 0x600800 + entry->read;
190 i2c->wr = 0x600800 + entry->write;
191 break;
192 case 5:
193 i2c->algo.bit.setsda = nv50_i2c_setsda;
194 i2c->algo.bit.setscl = nv50_i2c_setscl;
195 i2c->algo.bit.getsda = nv50_i2c_getsda;
196 i2c->algo.bit.getscl = nv50_i2c_getscl;
197 i2c->rd = nv50_i2c_port[entry->read];
198 i2c->wr = i2c->rd;
199 break;
200 case 6:
201 i2c->rd = entry->read;
202 i2c->wr = entry->write;
203 break;
204 default:
205 NV_ERROR(dev, "DCB I2C port type %d unknown\n",
206 entry->port_type);
207 kfree(i2c);
208 return -EINVAL;
209 }
210
211 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
212 "nouveau-%s-%d", pci_name(dev->pdev), index);
213 i2c->adapter.owner = THIS_MODULE;
214 i2c->adapter.dev.parent = &dev->pdev->dev;
215 i2c->dev = dev;
216 i2c_set_adapdata(&i2c->adapter, i2c);
217
218 if (entry->port_type < 6) {
219 i2c->adapter.algo_data = &i2c->algo.bit;
220 i2c->algo.bit.udelay = 40;
221 i2c->algo.bit.timeout = usecs_to_jiffies(5000);
222 i2c->algo.bit.data = i2c;
223 ret = i2c_bit_add_bus(&i2c->adapter);
224 } else {
225 i2c->adapter.algo_data = &i2c->algo.dp;
226 i2c->algo.dp.running = false;
227 i2c->algo.dp.address = 0;
228 i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
229 ret = i2c_dp_aux_add_bus(&i2c->adapter);
230 }
231
232 if (ret) {
233 NV_ERROR(dev, "Failed to register i2c %d\n", index);
234 kfree(i2c);
235 return ret;
236 }
237
238 entry->chan = i2c;
239 return 0;
240}
241
242void
243nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
244{
245 if (!entry->chan)
246 return;
247
248 i2c_del_adapter(&entry->chan->adapter);
249 kfree(entry->chan);
250 entry->chan = NULL;
251}
252
253struct nouveau_i2c_chan *
254nouveau_i2c_find(struct drm_device *dev, int index)
255{
256 struct drm_nouveau_private *dev_priv = dev->dev_private;
257 struct nvbios *bios = &dev_priv->VBIOS;
258
259 if (index > DCB_MAX_NUM_I2C_ENTRIES)
260 return NULL;
261
262 if (!bios->bdcb.dcb.i2c[index].chan) {
263 if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
264 return NULL;
265 }
266
267 return bios->bdcb.dcb.i2c[index].chan;
268}
269
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
new file mode 100644
index 000000000000..c8eaf7a9fcbb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __NOUVEAU_I2C_H__
24#define __NOUVEAU_I2C_H__
25
26#include <linux/i2c.h>
27#include <linux/i2c-id.h>
28#include <linux/i2c-algo-bit.h>
29#include "drm_dp_helper.h"
30
31struct dcb_i2c_entry;
32
33struct nouveau_i2c_chan {
34 struct i2c_adapter adapter;
35 struct drm_device *dev;
36 union {
37 struct i2c_algo_bit_data bit;
38 struct i2c_algo_dp_aux_data dp;
39 } algo;
40 unsigned rd;
41 unsigned wr;
42 unsigned data;
43};
44
45int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
46void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
47struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
48
49int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
50 uint8_t *read_byte);
51
52#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 000000000000..a2c30f4611ba
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,72 @@
1/**
2 * \file mga_ioc32.c
3 *
4 * 32-bit ioctl compatibility routines for the MGA DRM.
5 *
6 * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
7 *
8 *
9 * Copyright (C) Paul Mackerras 2005
10 * Copyright (C) Egbert Eich 2003,2004
11 * Copyright (C) Dave Airlie 2005
12 * All Rights Reserved.
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a
15 * copy of this software and associated documentation files (the "Software"),
16 * to deal in the Software without restriction, including without limitation
17 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 * and/or sell copies of the Software, and to permit persons to whom the
19 * Software is furnished to do so, subject to the following conditions:
20 *
21 * The above copyright notice and this permission notice (including the next
22 * paragraph) shall be included in all copies or substantial portions of the
23 * Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
28 * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
29 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/compat.h>
35
36#include "drmP.h"
37#include "drm.h"
38
39#include "nouveau_drv.h"
40
41/**
42 * Called whenever a 32-bit process running under a 64-bit kernel
43 * performs an ioctl on /dev/dri/card<n>.
44 *
45 * \param filp file pointer.
46 * \param cmd command.
47 * \param arg user argument.
48 * \return zero on success or negative number on failure.
49 */
50long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
51 unsigned long arg)
52{
53 unsigned int nr = DRM_IOCTL_NR(cmd);
54 drm_ioctl_compat_t *fn = NULL;
55 int ret;
56
57 if (nr < DRM_COMMAND_BASE)
58 return drm_compat_ioctl(filp, cmd, arg);
59
60#if 0
61 if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
62 fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
63#endif
64 lock_kernel(); /* XXX for now */
65 if (fn != NULL)
66 ret = (*fn)(filp, cmd, arg);
67 else
68 ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
69 unlock_kernel();
70
71 return ret;
72}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 000000000000..370c72c968d1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,702 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drm.h"
36#include "nouveau_drv.h"
37#include "nouveau_reg.h"
38#include <linux/ratelimit.h>
39
40/* needed for hotplug irq */
41#include "nouveau_connector.h"
42#include "nv50_display.h"
43
44void
45nouveau_irq_preinstall(struct drm_device *dev)
46{
47 struct drm_nouveau_private *dev_priv = dev->dev_private;
48
49 /* Master disable */
50 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
51
52 if (dev_priv->card_type == NV_50) {
53 INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
54 INIT_LIST_HEAD(&dev_priv->vbl_waiting);
55 }
56}
57
58int
59nouveau_irq_postinstall(struct drm_device *dev)
60{
61 /* Master enable */
62 nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
63 return 0;
64}
65
66void
67nouveau_irq_uninstall(struct drm_device *dev)
68{
69 /* Master disable */
70 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
71}
72
73static int
74nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
75{
76 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
77 struct nouveau_pgraph_object_method *grm;
78 struct nouveau_pgraph_object_class *grc;
79
80 grc = dev_priv->engine.graph.grclass;
81 while (grc->id) {
82 if (grc->id == class)
83 break;
84 grc++;
85 }
86
87 if (grc->id != class || !grc->methods)
88 return -ENOENT;
89
90 grm = grc->methods;
91 while (grm->id) {
92 if (grm->id == mthd)
93 return grm->exec(chan, class, mthd, data);
94 grm++;
95 }
96
97 return -ENOENT;
98}
99
100static bool
101nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
102{
103 struct drm_device *dev = chan->dev;
104 const int subc = (addr >> 13) & 0x7;
105 const int mthd = addr & 0x1ffc;
106
107 if (mthd == 0x0000) {
108 struct nouveau_gpuobj_ref *ref = NULL;
109
110 if (nouveau_gpuobj_ref_find(chan, data, &ref))
111 return false;
112
113 if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
114 return false;
115
116 chan->sw_subchannel[subc] = ref->gpuobj->class;
117 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
118 NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
119 return true;
120 }
121
122 /* hw object */
123 if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
124 return false;
125
126 if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
127 return false;
128
129 return true;
130}
131
132static void
133nouveau_fifo_irq_handler(struct drm_device *dev)
134{
135 struct drm_nouveau_private *dev_priv = dev->dev_private;
136 struct nouveau_engine *engine = &dev_priv->engine;
137 uint32_t status, reassign;
138 int cnt = 0;
139
140 reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
141 while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
142 struct nouveau_channel *chan = NULL;
143 uint32_t chid, get;
144
145 nv_wr32(dev, NV03_PFIFO_CACHES, 0);
146
147 chid = engine->fifo.channel_id(dev);
148 if (chid >= 0 && chid < engine->fifo.channels)
149 chan = dev_priv->fifos[chid];
150 get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
151
152 if (status & NV_PFIFO_INTR_CACHE_ERROR) {
153 uint32_t mthd, data;
154 int ptr;
155
156 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
157 * wrapping on my G80 chips, but CACHE1 isn't big
158 * enough for this much data.. Tests show that it
159 * wraps around to the start at GET=0x800.. No clue
160 * as to why..
161 */
162 ptr = (get & 0x7ff) >> 2;
163
164 if (dev_priv->card_type < NV_40) {
165 mthd = nv_rd32(dev,
166 NV04_PFIFO_CACHE1_METHOD(ptr));
167 data = nv_rd32(dev,
168 NV04_PFIFO_CACHE1_DATA(ptr));
169 } else {
170 mthd = nv_rd32(dev,
171 NV40_PFIFO_CACHE1_METHOD(ptr));
172 data = nv_rd32(dev,
173 NV40_PFIFO_CACHE1_DATA(ptr));
174 }
175
176 if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
177 NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
178 "Mthd 0x%04x Data 0x%08x\n",
179 chid, (mthd >> 13) & 7, mthd & 0x1ffc,
180 data);
181 }
182
183 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
184 nv_wr32(dev, NV03_PFIFO_INTR_0,
185 NV_PFIFO_INTR_CACHE_ERROR);
186
187 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
188 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
189 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
190 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
191 nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
192 nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
193
194 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
195 nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
196 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
197
198 status &= ~NV_PFIFO_INTR_CACHE_ERROR;
199 }
200
201 if (status & NV_PFIFO_INTR_DMA_PUSHER) {
202 NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
203
204 status &= ~NV_PFIFO_INTR_DMA_PUSHER;
205 nv_wr32(dev, NV03_PFIFO_INTR_0,
206 NV_PFIFO_INTR_DMA_PUSHER);
207
208 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
209 if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
210 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
211 get + 4);
212 }
213
214 if (status) {
215 NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
216 status, chid);
217 nv_wr32(dev, NV03_PFIFO_INTR_0, status);
218 status = 0;
219 }
220
221 nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
222 }
223
224 if (status) {
225 NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
226 nv_wr32(dev, 0x2140, 0);
227 nv_wr32(dev, 0x140, 0);
228 }
229
230 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
231}
232
233struct nouveau_bitfield_names {
234 uint32_t mask;
235 const char *name;
236};
237
238static struct nouveau_bitfield_names nstatus_names[] =
239{
240 { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
241 { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
242 { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
243 { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
244};
245
246static struct nouveau_bitfield_names nstatus_names_nv10[] =
247{
248 { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
249 { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
250 { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
251 { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
252};
253
254static struct nouveau_bitfield_names nsource_names[] =
255{
256 { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
257 { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
258 { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
259 { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
260 { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
261 { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
262 { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
263 { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
264 { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
265 { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
266 { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
267 { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
268 { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
269 { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
270 { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
271 { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
272 { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
273 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
274 { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
275};
276
277static void
278nouveau_print_bitfield_names_(uint32_t value,
279 const struct nouveau_bitfield_names *namelist,
280 const int namelist_len)
281{
282 /*
283 * Caller must have already printed the KERN_* log level for us.
284 * Also the caller is responsible for adding the newline.
285 */
286 int i;
287 for (i = 0; i < namelist_len; ++i) {
288 uint32_t mask = namelist[i].mask;
289 if (value & mask) {
290 printk(" %s", namelist[i].name);
291 value &= ~mask;
292 }
293 }
294 if (value)
295 printk(" (unknown bits 0x%08x)", value);
296}
297#define nouveau_print_bitfield_names(val, namelist) \
298 nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
299
300
301static int
302nouveau_graph_chid_from_grctx(struct drm_device *dev)
303{
304 struct drm_nouveau_private *dev_priv = dev->dev_private;
305 uint32_t inst;
306 int i;
307
308 if (dev_priv->card_type < NV_40)
309 return dev_priv->engine.fifo.channels;
310 else
311 if (dev_priv->card_type < NV_50) {
312 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
313
314 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
315 struct nouveau_channel *chan = dev_priv->fifos[i];
316
317 if (!chan || !chan->ramin_grctx)
318 continue;
319
320 if (inst == chan->ramin_grctx->instance)
321 break;
322 }
323 } else {
324 inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
325
326 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
327 struct nouveau_channel *chan = dev_priv->fifos[i];
328
329 if (!chan || !chan->ramin)
330 continue;
331
332 if (inst == chan->ramin->instance)
333 break;
334 }
335 }
336
337
338 return i;
339}
340
341static int
342nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
343{
344 struct drm_nouveau_private *dev_priv = dev->dev_private;
345 struct nouveau_engine *engine = &dev_priv->engine;
346 int channel;
347
348 if (dev_priv->card_type < NV_10)
349 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
350 else
351 if (dev_priv->card_type < NV_40)
352 channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
353 else
354 channel = nouveau_graph_chid_from_grctx(dev);
355
356 if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
357 NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
358 return -EINVAL;
359 }
360
361 *channel_ret = channel;
362 return 0;
363}
364
365struct nouveau_pgraph_trap {
366 int channel;
367 int class;
368 int subc, mthd, size;
369 uint32_t data, data2;
370 uint32_t nsource, nstatus;
371};
372
373static void
374nouveau_graph_trap_info(struct drm_device *dev,
375 struct nouveau_pgraph_trap *trap)
376{
377 struct drm_nouveau_private *dev_priv = dev->dev_private;
378 uint32_t address;
379
380 trap->nsource = trap->nstatus = 0;
381 if (dev_priv->card_type < NV_50) {
382 trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
383 trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
384 }
385
386 if (nouveau_graph_trapped_channel(dev, &trap->channel))
387 trap->channel = -1;
388 address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
389
390 trap->mthd = address & 0x1FFC;
391 trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
392 if (dev_priv->card_type < NV_10) {
393 trap->subc = (address >> 13) & 0x7;
394 } else {
395 trap->subc = (address >> 16) & 0x7;
396 trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
397 }
398
399 if (dev_priv->card_type < NV_10)
400 trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
401 else if (dev_priv->card_type < NV_40)
402 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
403 else if (dev_priv->card_type < NV_50)
404 trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
405 else
406 trap->class = nv_rd32(dev, 0x400814);
407}
408
409static void
410nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
411 struct nouveau_pgraph_trap *trap)
412{
413 struct drm_nouveau_private *dev_priv = dev->dev_private;
414 uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
415
416 NV_INFO(dev, "%s - nSource:", id);
417 nouveau_print_bitfield_names(nsource, nsource_names);
418 printk(", nStatus:");
419 if (dev_priv->card_type < NV_10)
420 nouveau_print_bitfield_names(nstatus, nstatus_names);
421 else
422 nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
423 printk("\n");
424
425 NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
426 "Data 0x%08x:0x%08x\n",
427 id, trap->channel, trap->subc,
428 trap->class, trap->mthd,
429 trap->data2, trap->data);
430}
431
432static int
433nouveau_pgraph_intr_swmthd(struct drm_device *dev,
434 struct nouveau_pgraph_trap *trap)
435{
436 struct drm_nouveau_private *dev_priv = dev->dev_private;
437
438 if (trap->channel < 0 ||
439 trap->channel >= dev_priv->engine.fifo.channels ||
440 !dev_priv->fifos[trap->channel])
441 return -ENODEV;
442
443 return nouveau_call_method(dev_priv->fifos[trap->channel],
444 trap->class, trap->mthd, trap->data);
445}
446
447static inline void
448nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
449{
450 struct nouveau_pgraph_trap trap;
451 int unhandled = 0;
452
453 nouveau_graph_trap_info(dev, &trap);
454
455 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
456 if (nouveau_pgraph_intr_swmthd(dev, &trap))
457 unhandled = 1;
458 } else {
459 unhandled = 1;
460 }
461
462 if (unhandled)
463 nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
464}
465
466static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
467
468static int nouveau_ratelimit(void)
469{
470 return __ratelimit(&nouveau_ratelimit_state);
471}
472
473
474static inline void
475nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
476{
477 struct nouveau_pgraph_trap trap;
478 int unhandled = 0;
479
480 nouveau_graph_trap_info(dev, &trap);
481 trap.nsource = nsource;
482
483 if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
484 if (nouveau_pgraph_intr_swmthd(dev, &trap))
485 unhandled = 1;
486 } else {
487 unhandled = 1;
488 }
489
490 if (unhandled && nouveau_ratelimit())
491 nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
492}
493
494static inline void
495nouveau_pgraph_intr_context_switch(struct drm_device *dev)
496{
497 struct drm_nouveau_private *dev_priv = dev->dev_private;
498 struct nouveau_engine *engine = &dev_priv->engine;
499 uint32_t chid;
500
501 chid = engine->fifo.channel_id(dev);
502 NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
503
504 switch (dev_priv->card_type) {
505 case NV_04:
506 nv04_graph_context_switch(dev);
507 break;
508 case NV_10:
509 nv10_graph_context_switch(dev);
510 break;
511 default:
512 NV_ERROR(dev, "Context switch not implemented\n");
513 break;
514 }
515}
516
517static void
518nouveau_pgraph_irq_handler(struct drm_device *dev)
519{
520 uint32_t status;
521
522 while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
523 uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
524
525 if (status & NV_PGRAPH_INTR_NOTIFY) {
526 nouveau_pgraph_intr_notify(dev, nsource);
527
528 status &= ~NV_PGRAPH_INTR_NOTIFY;
529 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
530 }
531
532 if (status & NV_PGRAPH_INTR_ERROR) {
533 nouveau_pgraph_intr_error(dev, nsource);
534
535 status &= ~NV_PGRAPH_INTR_ERROR;
536 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
537 }
538
539 if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
540 nouveau_pgraph_intr_context_switch(dev);
541
542 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
543 nv_wr32(dev, NV03_PGRAPH_INTR,
544 NV_PGRAPH_INTR_CONTEXT_SWITCH);
545 }
546
547 if (status) {
548 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
549 nv_wr32(dev, NV03_PGRAPH_INTR, status);
550 }
551
552 if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
553 nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
554 }
555
556 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
557}
558
559static void
560nv50_pgraph_irq_handler(struct drm_device *dev)
561{
562 uint32_t status, nsource;
563
564 status = nv_rd32(dev, NV03_PGRAPH_INTR);
565 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
566
567 if (status & 0x00000001) {
568 nouveau_pgraph_intr_notify(dev, nsource);
569 status &= ~0x00000001;
570 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
571 }
572
573 if (status & 0x00000010) {
574 nouveau_pgraph_intr_error(dev, nsource |
575 NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
576
577 status &= ~0x00000010;
578 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
579 }
580
581 if (status & 0x00001000) {
582 nv_wr32(dev, 0x400500, 0x00000000);
583 nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
584 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
585 NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
586 nv_wr32(dev, 0x400500, 0x00010001);
587
588 nv50_graph_context_switch(dev);
589
590 status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
591 }
592
593 if (status & 0x00100000) {
594 nouveau_pgraph_intr_error(dev, nsource |
595 NV03_PGRAPH_NSOURCE_DATA_ERROR);
596
597 status &= ~0x00100000;
598 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
599 }
600
601 if (status & 0x00200000) {
602 int r;
603
604 nouveau_pgraph_intr_error(dev, nsource |
605 NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
606
607 NV_ERROR(dev, "magic set 1:\n");
608 for (r = 0x408900; r <= 0x408910; r += 4)
609 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
610 nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
611 for (r = 0x408e08; r <= 0x408e24; r += 4)
612 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
613 nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
614
615 NV_ERROR(dev, "magic set 2:\n");
616 for (r = 0x409900; r <= 0x409910; r += 4)
617 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
618 nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
619 for (r = 0x409e08; r <= 0x409e24; r += 4)
620 NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
621 nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
622
623 status &= ~0x00200000;
624 nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
625 nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
626 }
627
628 if (status) {
629 NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
630 nv_wr32(dev, NV03_PGRAPH_INTR, status);
631 }
632
633 {
634 const int isb = (1 << 16) | (1 << 0);
635
636 if ((nv_rd32(dev, 0x400500) & isb) != isb)
637 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
638 }
639
640 nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
641}
642
643static void
644nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
645{
646 if (crtc & 1)
647 nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
648
649 if (crtc & 2)
650 nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
651}
652
653irqreturn_t
654nouveau_irq_handler(DRM_IRQ_ARGS)
655{
656 struct drm_device *dev = (struct drm_device *)arg;
657 struct drm_nouveau_private *dev_priv = dev->dev_private;
658 uint32_t status, fbdev_flags = 0;
659
660 status = nv_rd32(dev, NV03_PMC_INTR_0);
661 if (!status)
662 return IRQ_NONE;
663
664 if (dev_priv->fbdev_info) {
665 fbdev_flags = dev_priv->fbdev_info->flags;
666 dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
667 }
668
669 if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
670 nouveau_fifo_irq_handler(dev);
671 status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
672 }
673
674 if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
675 if (dev_priv->card_type >= NV_50)
676 nv50_pgraph_irq_handler(dev);
677 else
678 nouveau_pgraph_irq_handler(dev);
679
680 status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
681 }
682
683 if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
684 nouveau_crtc_irq_handler(dev, (status>>24)&3);
685 status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
686 }
687
688 if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
689 NV_PMC_INTR_0_NV50_I2C_PENDING)) {
690 nv50_display_irq_handler(dev);
691 status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
692 NV_PMC_INTR_0_NV50_I2C_PENDING);
693 }
694
695 if (status)
696 NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
697
698 if (dev_priv->fbdev_info)
699 dev_priv->fbdev_info->flags = fbdev_flags;
700
701 return IRQ_HANDLED;
702}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 000000000000..02755712ed3d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,568 @@
1/*
2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
4 *
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33#include "drmP.h"
34#include "drm.h"
35#include "drm_sarea.h"
36#include "nouveau_drv.h"
37
38static struct mem_block *
39split_block(struct mem_block *p, uint64_t start, uint64_t size,
40 struct drm_file *file_priv)
41{
42 /* Maybe cut off the start of an existing block */
43 if (start > p->start) {
44 struct mem_block *newblock =
45 kmalloc(sizeof(*newblock), GFP_KERNEL);
46 if (!newblock)
47 goto out;
48 newblock->start = start;
49 newblock->size = p->size - (start - p->start);
50 newblock->file_priv = NULL;
51 newblock->next = p->next;
52 newblock->prev = p;
53 p->next->prev = newblock;
54 p->next = newblock;
55 p->size -= newblock->size;
56 p = newblock;
57 }
58
59 /* Maybe cut off the end of an existing block */
60 if (size < p->size) {
61 struct mem_block *newblock =
62 kmalloc(sizeof(*newblock), GFP_KERNEL);
63 if (!newblock)
64 goto out;
65 newblock->start = start + size;
66 newblock->size = p->size - size;
67 newblock->file_priv = NULL;
68 newblock->next = p->next;
69 newblock->prev = p;
70 p->next->prev = newblock;
71 p->next = newblock;
72 p->size = size;
73 }
74
75out:
76 /* Our block is in the middle */
77 p->file_priv = file_priv;
78 return p;
79}
80
81struct mem_block *
82nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
83 int align2, struct drm_file *file_priv, int tail)
84{
85 struct mem_block *p;
86 uint64_t mask = (1 << align2) - 1;
87
88 if (!heap)
89 return NULL;
90
91 if (tail) {
92 list_for_each_prev(p, heap) {
93 uint64_t start = ((p->start + p->size) - size) & ~mask;
94
95 if (p->file_priv == NULL && start >= p->start &&
96 start + size <= p->start + p->size)
97 return split_block(p, start, size, file_priv);
98 }
99 } else {
100 list_for_each(p, heap) {
101 uint64_t start = (p->start + mask) & ~mask;
102
103 if (p->file_priv == NULL &&
104 start + size <= p->start + p->size)
105 return split_block(p, start, size, file_priv);
106 }
107 }
108
109 return NULL;
110}
111
112void nouveau_mem_free_block(struct mem_block *p)
113{
114 p->file_priv = NULL;
115
116 /* Assumes a single contiguous range. Needs a special file_priv in
117 * 'heap' to stop it being subsumed.
118 */
119 if (p->next->file_priv == NULL) {
120 struct mem_block *q = p->next;
121 p->size += q->size;
122 p->next = q->next;
123 p->next->prev = p;
124 kfree(q);
125 }
126
127 if (p->prev->file_priv == NULL) {
128 struct mem_block *q = p->prev;
129 q->size += p->size;
130 q->next = p->next;
131 q->next->prev = q;
132 kfree(p);
133 }
134}
135
136/* Initialize. How to check for an uninitialized heap?
137 */
138int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
139 uint64_t size)
140{
141 struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
142
143 if (!blocks)
144 return -ENOMEM;
145
146 *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
147 if (!*heap) {
148 kfree(blocks);
149 return -ENOMEM;
150 }
151
152 blocks->start = start;
153 blocks->size = size;
154 blocks->file_priv = NULL;
155 blocks->next = blocks->prev = *heap;
156
157 memset(*heap, 0, sizeof(**heap));
158 (*heap)->file_priv = (struct drm_file *) -1;
159 (*heap)->next = (*heap)->prev = blocks;
160 return 0;
161}
162
163/*
164 * Free all blocks associated with the releasing file_priv
165 */
166void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
167{
168 struct mem_block *p;
169
170 if (!heap || !heap->next)
171 return;
172
173 list_for_each(p, heap) {
174 if (p->file_priv == file_priv)
175 p->file_priv = NULL;
176 }
177
178 /* Assumes a single contiguous range. Needs a special file_priv in
179 * 'heap' to stop it being subsumed.
180 */
181 list_for_each(p, heap) {
182 while ((p->file_priv == NULL) &&
183 (p->next->file_priv == NULL) &&
184 (p->next != heap)) {
185 struct mem_block *q = p->next;
186 p->size += q->size;
187 p->next = q->next;
188 p->next->prev = p;
189 kfree(q);
190 }
191 }
192}
193
194/*
195 * NV50 VM helpers
196 */
197int
198nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
199 uint32_t flags, uint64_t phys)
200{
201 struct drm_nouveau_private *dev_priv = dev->dev_private;
202 struct nouveau_gpuobj **pgt;
203 unsigned psz, pfl, pages;
204
205 if (virt >= dev_priv->vm_gart_base &&
206 (virt + size) < (dev_priv->vm_gart_base + dev_priv->vm_gart_size)) {
207 psz = 12;
208 pgt = &dev_priv->gart_info.sg_ctxdma;
209 pfl = 0x21;
210 virt -= dev_priv->vm_gart_base;
211 } else
212 if (virt >= dev_priv->vm_vram_base &&
213 (virt + size) < (dev_priv->vm_vram_base + dev_priv->vm_vram_size)) {
214 psz = 16;
215 pgt = dev_priv->vm_vram_pt;
216 pfl = 0x01;
217 virt -= dev_priv->vm_vram_base;
218 } else {
219 NV_ERROR(dev, "Invalid address: 0x%16llx-0x%16llx\n",
220 virt, virt + size - 1);
221 return -EINVAL;
222 }
223
224 pages = size >> psz;
225
226 dev_priv->engine.instmem.prepare_access(dev, true);
227 if (flags & 0x80000000) {
228 while (pages--) {
229 struct nouveau_gpuobj *pt = pgt[virt >> 29];
230 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
231
232 nv_wo32(dev, pt, pte++, 0x00000000);
233 nv_wo32(dev, pt, pte++, 0x00000000);
234
235 virt += (1 << psz);
236 }
237 } else {
238 while (pages--) {
239 struct nouveau_gpuobj *pt = pgt[virt >> 29];
240 unsigned pte = ((virt & 0x1fffffffULL) >> psz) << 1;
241 unsigned offset_h = upper_32_bits(phys) & 0xff;
242 unsigned offset_l = lower_32_bits(phys);
243
244 nv_wo32(dev, pt, pte++, offset_l | pfl);
245 nv_wo32(dev, pt, pte++, offset_h | flags);
246
247 phys += (1 << psz);
248 virt += (1 << psz);
249 }
250 }
251 dev_priv->engine.instmem.finish_access(dev);
252
253 nv_wr32(dev, 0x100c80, 0x00050001);
254 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
255 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
256 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
257 return -EBUSY;
258 }
259
260 nv_wr32(dev, 0x100c80, 0x00000001);
261 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
262 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
263 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
264 return -EBUSY;
265 }
266
267 return 0;
268}
269
270void
271nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
272{
273 nv50_mem_vm_bind_linear(dev, virt, size, 0x80000000, 0);
274}
275
276/*
277 * Cleanup everything
278 */
279void nouveau_mem_takedown(struct mem_block **heap)
280{
281 struct mem_block *p;
282
283 if (!*heap)
284 return;
285
286 for (p = (*heap)->next; p != *heap;) {
287 struct mem_block *q = p;
288 p = p->next;
289 kfree(q);
290 }
291
292 kfree(*heap);
293 *heap = NULL;
294}
295
296void nouveau_mem_close(struct drm_device *dev)
297{
298 struct drm_nouveau_private *dev_priv = dev->dev_private;
299
300 if (dev_priv->ttm.bdev.man[TTM_PL_PRIV0].has_type)
301 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_PRIV0);
302 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
303
304 ttm_bo_device_release(&dev_priv->ttm.bdev);
305
306 nouveau_ttm_global_release(dev_priv);
307
308 if (drm_core_has_AGP(dev) && dev->agp &&
309 drm_core_check_feature(dev, DRIVER_MODESET)) {
310 struct drm_agp_mem *entry, *tempe;
311
312 /* Remove AGP resources, but leave dev->agp
313 intact until drv_cleanup is called. */
314 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
315 if (entry->bound)
316 drm_unbind_agp(entry->memory);
317 drm_free_agp(entry->memory, entry->pages);
318 kfree(entry);
319 }
320 INIT_LIST_HEAD(&dev->agp->memory);
321
322 if (dev->agp->acquired)
323 drm_agp_release(dev);
324
325 dev->agp->acquired = 0;
326 dev->agp->enabled = 0;
327 }
328
329 if (dev_priv->fb_mtrr) {
330 drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
331 drm_get_resource_len(dev, 1), DRM_MTRR_WC);
332 dev_priv->fb_mtrr = 0;
333 }
334}
335
336/*XXX won't work on BSD because of pci_read_config_dword */
337static uint32_t
338nouveau_mem_fb_amount_igp(struct drm_device *dev)
339{
340 struct drm_nouveau_private *dev_priv = dev->dev_private;
341 struct pci_dev *bridge;
342 uint32_t mem;
343
344 bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
345 if (!bridge) {
346 NV_ERROR(dev, "no bridge device\n");
347 return 0;
348 }
349
350 if (dev_priv->flags&NV_NFORCE) {
351 pci_read_config_dword(bridge, 0x7C, &mem);
352 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
353 } else
354 if (dev_priv->flags&NV_NFORCE2) {
355 pci_read_config_dword(bridge, 0x84, &mem);
356 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
357 }
358
359 NV_ERROR(dev, "impossible!\n");
360 return 0;
361}
362
363/* returns the amount of FB ram in bytes */
364uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
365{
366 struct drm_nouveau_private *dev_priv = dev->dev_private;
367 uint32_t boot0;
368
369 switch (dev_priv->card_type) {
370 case NV_04:
371 boot0 = nv_rd32(dev, NV03_BOOT_0);
372 if (boot0 & 0x00000100)
373 return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
374
375 switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
376 case NV04_BOOT_0_RAM_AMOUNT_32MB:
377 return 32 * 1024 * 1024;
378 case NV04_BOOT_0_RAM_AMOUNT_16MB:
379 return 16 * 1024 * 1024;
380 case NV04_BOOT_0_RAM_AMOUNT_8MB:
381 return 8 * 1024 * 1024;
382 case NV04_BOOT_0_RAM_AMOUNT_4MB:
383 return 4 * 1024 * 1024;
384 }
385 break;
386 case NV_10:
387 case NV_20:
388 case NV_30:
389 case NV_40:
390 case NV_50:
391 default:
392 if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
393 return nouveau_mem_fb_amount_igp(dev);
394 } else {
395 uint64_t mem;
396 mem = (nv_rd32(dev, NV04_FIFO_DATA) &
397 NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
398 NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
399 return mem * 1024 * 1024;
400 }
401 break;
402 }
403
404 NV_ERROR(dev,
405 "Unable to detect video ram size. Please report your setup to "
406 DRIVER_EMAIL "\n");
407 return 0;
408}
409
410static void nouveau_mem_reset_agp(struct drm_device *dev)
411{
412 uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
413
414 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
415 saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
416
417 /* clear busmaster bit */
418 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
419 /* clear SBA and AGP bits */
420 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
421
422 /* power cycle pgraph, if enabled */
423 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
424 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
425 nv_wr32(dev, NV03_PMC_ENABLE,
426 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
427 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
428 NV_PMC_ENABLE_PGRAPH);
429 }
430
431 /* and restore (gives effect of resetting AGP) */
432 nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
433 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
434}
435
436int
437nouveau_mem_init_agp(struct drm_device *dev)
438{
439 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct drm_agp_info info;
441 struct drm_agp_mode mode;
442 int ret;
443
444 if (nouveau_noagp)
445 return 0;
446
447 nouveau_mem_reset_agp(dev);
448
449 if (!dev->agp->acquired) {
450 ret = drm_agp_acquire(dev);
451 if (ret) {
452 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
453 return ret;
454 }
455 }
456
457 ret = drm_agp_info(dev, &info);
458 if (ret) {
459 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
460 return ret;
461 }
462
463 /* see agp.h for the AGPSTAT_* modes available */
464 mode.mode = info.mode;
465 ret = drm_agp_enable(dev, mode);
466 if (ret) {
467 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
468 return ret;
469 }
470
471 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
472 dev_priv->gart_info.aper_base = info.aperture_base;
473 dev_priv->gart_info.aper_size = info.aperture_size;
474 return 0;
475}
476
477int
478nouveau_mem_init(struct drm_device *dev)
479{
480 struct drm_nouveau_private *dev_priv = dev->dev_private;
481 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
482 int ret, dma_bits = 32;
483
484 dev_priv->fb_phys = drm_get_resource_start(dev, 1);
485 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
486
487 if (dev_priv->card_type >= NV_50 &&
488 pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
489 dma_bits = 40;
490
491 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
492 if (ret) {
493 NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
494 return ret;
495 }
496
497 ret = nouveau_ttm_global_init(dev_priv);
498 if (ret)
499 return ret;
500
501 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
502 dev_priv->ttm.bo_global_ref.ref.object,
503 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
504 dma_bits <= 32 ? true : false);
505 if (ret) {
506 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
507 return ret;
508 }
509
510 INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
511 spin_lock_init(&dev_priv->ttm.bo_list_lock);
512
513 dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
514
515 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
516 if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
517 dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
518 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
519
520 NV_INFO(dev, "%d MiB VRAM\n", (int)(dev_priv->fb_available_size >> 20));
521
522 /* remove reserved space at end of vram from available amount */
523 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
524 dev_priv->fb_aper_free = dev_priv->fb_available_size;
525
526 /* mappable vram */
527 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
528 dev_priv->fb_available_size >> PAGE_SHIFT);
529 if (ret) {
530 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
531 return ret;
532 }
533
534 /* GART */
535#if !defined(__powerpc__) && !defined(__ia64__)
536 if (drm_device_is_agp(dev) && dev->agp) {
537 ret = nouveau_mem_init_agp(dev);
538 if (ret)
539 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
540 }
541#endif
542
543 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
544 ret = nouveau_sgdma_init(dev);
545 if (ret) {
546 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
547 return ret;
548 }
549 }
550
551 NV_INFO(dev, "%d MiB GART (aperture)\n",
552 (int)(dev_priv->gart_info.aper_size >> 20));
553 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
554
555 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
556 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
557 if (ret) {
558 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
559 return ret;
560 }
561
562 dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
563 drm_get_resource_len(dev, 1),
564 DRM_MTRR_WC);
565 return 0;
566}
567
568
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 000000000000..6c66a34b6345
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,196 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31
32int
33nouveau_notifier_init_channel(struct nouveau_channel *chan)
34{
35 struct drm_device *dev = chan->dev;
36 struct nouveau_bo *ntfy = NULL;
37 int ret;
38
39 ret = nouveau_gem_new(dev, NULL, PAGE_SIZE, 0, nouveau_vram_notify ?
40 TTM_PL_FLAG_VRAM : TTM_PL_FLAG_TT,
41 0, 0x0000, false, true, &ntfy);
42 if (ret)
43 return ret;
44
45 ret = nouveau_bo_pin(ntfy, TTM_PL_FLAG_VRAM);
46 if (ret)
47 goto out_err;
48
49 ret = nouveau_bo_map(ntfy);
50 if (ret)
51 goto out_err;
52
53 ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
54 if (ret)
55 goto out_err;
56
57 chan->notifier_bo = ntfy;
58out_err:
59 if (ret) {
60 mutex_lock(&dev->struct_mutex);
61 drm_gem_object_unreference(ntfy->gem);
62 mutex_unlock(&dev->struct_mutex);
63 }
64
65 return ret;
66}
67
68void
69nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
70{
71 struct drm_device *dev = chan->dev;
72
73 if (!chan->notifier_bo)
74 return;
75
76 nouveau_bo_unmap(chan->notifier_bo);
77 mutex_lock(&dev->struct_mutex);
78 nouveau_bo_unpin(chan->notifier_bo);
79 drm_gem_object_unreference(chan->notifier_bo->gem);
80 mutex_unlock(&dev->struct_mutex);
81 nouveau_mem_takedown(&chan->notifier_heap);
82}
83
84static void
85nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
86 struct nouveau_gpuobj *gpuobj)
87{
88 NV_DEBUG(dev, "\n");
89
90 if (gpuobj->priv)
91 nouveau_mem_free_block(gpuobj->priv);
92}
93
94int
95nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
96 int size, uint32_t *b_offset)
97{
98 struct drm_device *dev = chan->dev;
99 struct drm_nouveau_private *dev_priv = dev->dev_private;
100 struct nouveau_gpuobj *nobj = NULL;
101 struct mem_block *mem;
102 uint32_t offset;
103 int target, ret;
104
105 if (!chan->notifier_heap) {
106 NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
107 chan->id);
108 return -EINVAL;
109 }
110
111 mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
112 (struct drm_file *)-2, 0);
113 if (!mem) {
114 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
115 return -ENOMEM;
116 }
117
118 offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
119 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
120 target = NV_DMA_TARGET_VIDMEM;
121 } else
122 if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
123 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
124 dev_priv->card_type < NV_50) {
125 ret = nouveau_sgdma_get_page(dev, offset, &offset);
126 if (ret)
127 return ret;
128 target = NV_DMA_TARGET_PCI;
129 } else {
130 target = NV_DMA_TARGET_AGP;
131 }
132 } else {
133 NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
134 chan->notifier_bo->bo.mem.mem_type);
135 return -EINVAL;
136 }
137 offset += mem->start;
138
139 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
140 mem->size, NV_DMA_ACCESS_RW, target,
141 &nobj);
142 if (ret) {
143 nouveau_mem_free_block(mem);
144 NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
145 return ret;
146 }
147 nobj->dtor = nouveau_notifier_gpuobj_dtor;
148 nobj->priv = mem;
149
150 ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
151 if (ret) {
152 nouveau_gpuobj_del(dev, &nobj);
153 nouveau_mem_free_block(mem);
154 NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
155 return ret;
156 }
157
158 *b_offset = mem->start;
159 return 0;
160}
161
162int
163nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
164{
165 if (!nobj || nobj->dtor != nouveau_notifier_gpuobj_dtor)
166 return -EINVAL;
167
168 if (poffset) {
169 struct mem_block *mem = nobj->priv;
170
171 if (*poffset >= mem->size)
172 return false;
173
174 *poffset += mem->start;
175 }
176
177 return 0;
178}
179
180int
181nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
182 struct drm_file *file_priv)
183{
184 struct drm_nouveau_notifierobj_alloc *na = data;
185 struct nouveau_channel *chan;
186 int ret;
187
188 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
189 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
190
191 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
192 if (ret)
193 return ret;
194
195 return 0;
196}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 000000000000..93379bb81bea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1294 @@
1/*
2 * Copyright (C) 2006 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28/*
29 * Authors:
30 * Ben Skeggs <darktama@iinet.net.au>
31 */
32
33#include "drmP.h"
34#include "drm.h"
35#include "nouveau_drv.h"
36#include "nouveau_drm.h"
37
38/* NVidia uses context objects to drive drawing operations.
39
40 Context objects can be selected into 8 subchannels in the FIFO,
41 and then used via DMA command buffers.
42
43 A context object is referenced by a user defined handle (CARD32). The HW
44 looks up graphics objects in a hash table in the instance RAM.
45
46 An entry in the hash table consists of 2 CARD32. The first CARD32 contains
47 the handle, the second one a bitfield, that contains the address of the
48 object in instance RAM.
49
50 The format of the second CARD32 seems to be:
51
52 NV4 to NV30:
53
54 15: 0 instance_addr >> 4
55 17:16 engine (here uses 1 = graphics)
56 28:24 channel id (here uses 0)
57 31 valid (use 1)
58
59 NV40:
60
61 15: 0 instance_addr >> 4 (maybe 19-0)
62 21:20 engine (here uses 1 = graphics)
63 I'm unsure about the other bits, but using 0 seems to work.
64
65 The key into the hash table depends on the object handle and channel id and
66 is given as:
67*/
68static uint32_t
69nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
70{
71 struct drm_nouveau_private *dev_priv = dev->dev_private;
72 uint32_t hash = 0;
73 int i;
74
75 NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
76
77 for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
78 hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
79 handle >>= dev_priv->ramht_bits;
80 }
81
82 if (dev_priv->card_type < NV_50)
83 hash ^= channel << (dev_priv->ramht_bits - 4);
84 hash <<= 3;
85
86 NV_DEBUG(dev, "hash=0x%08x\n", hash);
87 return hash;
88}
89
90static int
91nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
92 uint32_t offset)
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
96
97 if (dev_priv->card_type < NV_40)
98 return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
99 return (ctx != 0);
100}
101
102static int
103nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
104{
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
107 struct nouveau_channel *chan = ref->channel;
108 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
109 uint32_t ctx, co, ho;
110
111 if (!ramht) {
112 NV_ERROR(dev, "No hash table!\n");
113 return -EINVAL;
114 }
115
116 if (dev_priv->card_type < NV_40) {
117 ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
118 (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
119 (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
120 } else
121 if (dev_priv->card_type < NV_50) {
122 ctx = (ref->instance >> 4) |
123 (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
124 (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
125 } else {
126 if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
127 ctx = (ref->instance << 10) | 2;
128 } else {
129 ctx = (ref->instance >> 4) |
130 ((ref->gpuobj->engine <<
131 NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
132 }
133 }
134
135 instmem->prepare_access(dev, true);
136 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
137 do {
138 if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
139 NV_DEBUG(dev,
140 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
141 chan->id, co, ref->handle, ctx);
142 nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
143 nv_wo32(dev, ramht, (co + 4)/4, ctx);
144
145 list_add_tail(&ref->list, &chan->ramht_refs);
146 instmem->finish_access(dev);
147 return 0;
148 }
149 NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
150 chan->id, co, nv_ro32(dev, ramht, co/4));
151
152 co += 8;
153 if (co >= dev_priv->ramht_size)
154 co = 0;
155 } while (co != ho);
156 instmem->finish_access(dev);
157
158 NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
159 return -ENOMEM;
160}
161
162static void
163nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
167 struct nouveau_channel *chan = ref->channel;
168 struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
169 uint32_t co, ho;
170
171 if (!ramht) {
172 NV_ERROR(dev, "No hash table!\n");
173 return;
174 }
175
176 instmem->prepare_access(dev, true);
177 co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
178 do {
179 if (nouveau_ramht_entry_valid(dev, ramht, co) &&
180 (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
181 NV_DEBUG(dev,
182 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
183 chan->id, co, ref->handle,
184 nv_ro32(dev, ramht, (co + 4)));
185 nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
186 nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
187
188 list_del(&ref->list);
189 instmem->finish_access(dev);
190 return;
191 }
192
193 co += 8;
194 if (co >= dev_priv->ramht_size)
195 co = 0;
196 } while (co != ho);
197 list_del(&ref->list);
198 instmem->finish_access(dev);
199
200 NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
201 chan->id, ref->handle);
202}
203
204int
205nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
206 uint32_t size, int align, uint32_t flags,
207 struct nouveau_gpuobj **gpuobj_ret)
208{
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
210 struct nouveau_engine *engine = &dev_priv->engine;
211 struct nouveau_gpuobj *gpuobj;
212 struct mem_block *pramin = NULL;
213 int ret;
214
215 NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
216 chan ? chan->id : -1, size, align, flags);
217
218 if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
219 return -EINVAL;
220
221 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
222 if (!gpuobj)
223 return -ENOMEM;
224 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
225 gpuobj->flags = flags;
226 gpuobj->im_channel = chan;
227
228 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
229
230 /* Choose between global instmem heap, and per-channel private
231 * instmem heap. On <NV50 allow requests for private instmem
232 * to be satisfied from global heap if no per-channel area
233 * available.
234 */
235 if (chan) {
236 if (chan->ramin_heap) {
237 NV_DEBUG(dev, "private heap\n");
238 pramin = chan->ramin_heap;
239 } else
240 if (dev_priv->card_type < NV_50) {
241 NV_DEBUG(dev, "global heap fallback\n");
242 pramin = dev_priv->ramin_heap;
243 }
244 } else {
245 NV_DEBUG(dev, "global heap\n");
246 pramin = dev_priv->ramin_heap;
247 }
248
249 if (!pramin) {
250 NV_ERROR(dev, "No PRAMIN heap!\n");
251 return -EINVAL;
252 }
253
254 if (!chan) {
255 ret = engine->instmem.populate(dev, gpuobj, &size);
256 if (ret) {
257 nouveau_gpuobj_del(dev, &gpuobj);
258 return ret;
259 }
260 }
261
262 /* Allocate a chunk of the PRAMIN aperture */
263 gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
264 drm_order(align),
265 (struct drm_file *)-2, 0);
266 if (!gpuobj->im_pramin) {
267 nouveau_gpuobj_del(dev, &gpuobj);
268 return -ENOMEM;
269 }
270
271 if (!chan) {
272 ret = engine->instmem.bind(dev, gpuobj);
273 if (ret) {
274 nouveau_gpuobj_del(dev, &gpuobj);
275 return ret;
276 }
277 }
278
279 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
280 int i;
281
282 engine->instmem.prepare_access(dev, true);
283 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
284 nv_wo32(dev, gpuobj, i/4, 0);
285 engine->instmem.finish_access(dev);
286 }
287
288 *gpuobj_ret = gpuobj;
289 return 0;
290}
291
292int
293nouveau_gpuobj_early_init(struct drm_device *dev)
294{
295 struct drm_nouveau_private *dev_priv = dev->dev_private;
296
297 NV_DEBUG(dev, "\n");
298
299 INIT_LIST_HEAD(&dev_priv->gpuobj_list);
300
301 return 0;
302}
303
304int
305nouveau_gpuobj_init(struct drm_device *dev)
306{
307 struct drm_nouveau_private *dev_priv = dev->dev_private;
308 int ret;
309
310 NV_DEBUG(dev, "\n");
311
312 if (dev_priv->card_type < NV_50) {
313 ret = nouveau_gpuobj_new_fake(dev,
314 dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
315 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
316 &dev_priv->ramht, NULL);
317 if (ret)
318 return ret;
319 }
320
321 return 0;
322}
323
324void
325nouveau_gpuobj_takedown(struct drm_device *dev)
326{
327 struct drm_nouveau_private *dev_priv = dev->dev_private;
328
329 NV_DEBUG(dev, "\n");
330
331 nouveau_gpuobj_del(dev, &dev_priv->ramht);
332}
333
334void
335nouveau_gpuobj_late_takedown(struct drm_device *dev)
336{
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_gpuobj *gpuobj = NULL;
339 struct list_head *entry, *tmp;
340
341 NV_DEBUG(dev, "\n");
342
343 list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
344 gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
345
346 NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
347 gpuobj, gpuobj->refcount);
348 gpuobj->refcount = 0;
349 nouveau_gpuobj_del(dev, &gpuobj);
350 }
351}
352
353int
354nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 struct nouveau_engine *engine = &dev_priv->engine;
358 struct nouveau_gpuobj *gpuobj;
359 int i;
360
361 NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
362
363 if (!dev_priv || !pgpuobj || !(*pgpuobj))
364 return -EINVAL;
365 gpuobj = *pgpuobj;
366
367 if (gpuobj->refcount != 0) {
368 NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
369 return -EINVAL;
370 }
371
372 if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
373 engine->instmem.prepare_access(dev, true);
374 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
375 nv_wo32(dev, gpuobj, i/4, 0);
376 engine->instmem.finish_access(dev);
377 }
378
379 if (gpuobj->dtor)
380 gpuobj->dtor(dev, gpuobj);
381
382 if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
383 engine->instmem.clear(dev, gpuobj);
384
385 if (gpuobj->im_pramin) {
386 if (gpuobj->flags & NVOBJ_FLAG_FAKE)
387 kfree(gpuobj->im_pramin);
388 else
389 nouveau_mem_free_block(gpuobj->im_pramin);
390 }
391
392 list_del(&gpuobj->list);
393
394 *pgpuobj = NULL;
395 kfree(gpuobj);
396 return 0;
397}
398
399static int
400nouveau_gpuobj_instance_get(struct drm_device *dev,
401 struct nouveau_channel *chan,
402 struct nouveau_gpuobj *gpuobj, uint32_t *inst)
403{
404 struct drm_nouveau_private *dev_priv = dev->dev_private;
405 struct nouveau_gpuobj *cpramin;
406
407 /* <NV50 use PRAMIN address everywhere */
408 if (dev_priv->card_type < NV_50) {
409 *inst = gpuobj->im_pramin->start;
410 return 0;
411 }
412
413 if (chan && gpuobj->im_channel != chan) {
414 NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
415 gpuobj->im_channel->id, chan->id);
416 return -EINVAL;
417 }
418
419 /* NV50 channel-local instance */
420 if (chan) {
421 cpramin = chan->ramin->gpuobj;
422 *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
423 return 0;
424 }
425
426 /* NV50 global (VRAM) instance */
427 if (!gpuobj->im_channel) {
428 /* ...from global heap */
429 if (!gpuobj->im_backing) {
430 NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
431 return -EINVAL;
432 }
433 *inst = gpuobj->im_backing_start;
434 return 0;
435 } else {
436 /* ...from local heap */
437 cpramin = gpuobj->im_channel->ramin->gpuobj;
438 *inst = cpramin->im_backing_start +
439 (gpuobj->im_pramin->start - cpramin->im_pramin->start);
440 return 0;
441 }
442
443 return -EINVAL;
444}
445
446int
447nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
448 uint32_t handle, struct nouveau_gpuobj *gpuobj,
449 struct nouveau_gpuobj_ref **ref_ret)
450{
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_gpuobj_ref *ref;
453 uint32_t instance;
454 int ret;
455
456 NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
457 chan ? chan->id : -1, handle, gpuobj);
458
459 if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
460 return -EINVAL;
461
462 if (!chan && !ref_ret)
463 return -EINVAL;
464
465 if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
466 /* sw object */
467 instance = 0x40;
468 } else {
469 ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
470 if (ret)
471 return ret;
472 }
473
474 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
475 if (!ref)
476 return -ENOMEM;
477 INIT_LIST_HEAD(&ref->list);
478 ref->gpuobj = gpuobj;
479 ref->channel = chan;
480 ref->instance = instance;
481
482 if (!ref_ret) {
483 ref->handle = handle;
484
485 ret = nouveau_ramht_insert(dev, ref);
486 if (ret) {
487 kfree(ref);
488 return ret;
489 }
490 } else {
491 ref->handle = ~0;
492 *ref_ret = ref;
493 }
494
495 ref->gpuobj->refcount++;
496 return 0;
497}
498
499int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
500{
501 struct nouveau_gpuobj_ref *ref;
502
503 NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
504
505 if (!dev || !pref || *pref == NULL)
506 return -EINVAL;
507 ref = *pref;
508
509 if (ref->handle != ~0)
510 nouveau_ramht_remove(dev, ref);
511
512 if (ref->gpuobj) {
513 ref->gpuobj->refcount--;
514
515 if (ref->gpuobj->refcount == 0) {
516 if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
517 nouveau_gpuobj_del(dev, &ref->gpuobj);
518 }
519 }
520
521 *pref = NULL;
522 kfree(ref);
523 return 0;
524}
525
526int
527nouveau_gpuobj_new_ref(struct drm_device *dev,
528 struct nouveau_channel *oc, struct nouveau_channel *rc,
529 uint32_t handle, uint32_t size, int align,
530 uint32_t flags, struct nouveau_gpuobj_ref **ref)
531{
532 struct nouveau_gpuobj *gpuobj = NULL;
533 int ret;
534
535 ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
536 if (ret)
537 return ret;
538
539 ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
540 if (ret) {
541 nouveau_gpuobj_del(dev, &gpuobj);
542 return ret;
543 }
544
545 return 0;
546}
547
548int
549nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
550 struct nouveau_gpuobj_ref **ref_ret)
551{
552 struct nouveau_gpuobj_ref *ref;
553 struct list_head *entry, *tmp;
554
555 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
556 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
557
558 if (ref->handle == handle) {
559 if (ref_ret)
560 *ref_ret = ref;
561 return 0;
562 }
563 }
564
565 return -EINVAL;
566}
567
568int
569nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
570 uint32_t b_offset, uint32_t size,
571 uint32_t flags, struct nouveau_gpuobj **pgpuobj,
572 struct nouveau_gpuobj_ref **pref)
573{
574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575 struct nouveau_gpuobj *gpuobj = NULL;
576 int i;
577
578 NV_DEBUG(dev,
579 "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
580 p_offset, b_offset, size, flags);
581
582 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
583 if (!gpuobj)
584 return -ENOMEM;
585 NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
586 gpuobj->im_channel = NULL;
587 gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
588
589 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
590
591 if (p_offset != ~0) {
592 gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
593 GFP_KERNEL);
594 if (!gpuobj->im_pramin) {
595 nouveau_gpuobj_del(dev, &gpuobj);
596 return -ENOMEM;
597 }
598 gpuobj->im_pramin->start = p_offset;
599 gpuobj->im_pramin->size = size;
600 }
601
602 if (b_offset != ~0) {
603 gpuobj->im_backing = (struct nouveau_bo *)-1;
604 gpuobj->im_backing_start = b_offset;
605 }
606
607 if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
608 dev_priv->engine.instmem.prepare_access(dev, true);
609 for (i = 0; i < gpuobj->im_pramin->size; i += 4)
610 nv_wo32(dev, gpuobj, i/4, 0);
611 dev_priv->engine.instmem.finish_access(dev);
612 }
613
614 if (pref) {
615 i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
616 if (i) {
617 nouveau_gpuobj_del(dev, &gpuobj);
618 return i;
619 }
620 }
621
622 if (pgpuobj)
623 *pgpuobj = gpuobj;
624 return 0;
625}
626
627
628static uint32_t
629nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
630{
631 struct drm_nouveau_private *dev_priv = dev->dev_private;
632
633 /*XXX: dodgy hack for now */
634 if (dev_priv->card_type >= NV_50)
635 return 24;
636 if (dev_priv->card_type >= NV_40)
637 return 32;
638 return 16;
639}
640
641/*
642 DMA objects are used to reference a piece of memory in the
643 framebuffer, PCI or AGP address space. Each object is 16 bytes big
644 and looks as follows:
645
646 entry[0]
647 11:0 class (seems like I can always use 0 here)
648 12 page table present?
649 13 page entry linear?
650 15:14 access: 0 rw, 1 ro, 2 wo
651 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
652 31:20 dma adjust (bits 0-11 of the address)
653 entry[1]
654 dma limit (size of transfer)
655 entry[X]
656 1 0 readonly, 1 readwrite
657 31:12 dma frame address of the page (bits 12-31 of the address)
658 entry[N]
659 page table terminator, same value as the first pte, as does nvidia
660 rivatv uses 0xffffffff
661
662 Non linear page tables need a list of frame addresses afterwards,
663 the rivatv project has some info on this.
664
665 The method below creates a DMA object in instance RAM and returns a handle
666 to it that can be used to set up context objects.
667*/
668int
669nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
670 uint64_t offset, uint64_t size, int access,
671 int target, struct nouveau_gpuobj **gpuobj)
672{
673 struct drm_device *dev = chan->dev;
674 struct drm_nouveau_private *dev_priv = dev->dev_private;
675 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
676 int ret;
677
678 NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
679 chan->id, class, offset, size);
680 NV_DEBUG(dev, "access=%d target=%d\n", access, target);
681
682 switch (target) {
683 case NV_DMA_TARGET_AGP:
684 offset += dev_priv->gart_info.aper_base;
685 break;
686 default:
687 break;
688 }
689
690 ret = nouveau_gpuobj_new(dev, chan,
691 nouveau_gpuobj_class_instmem_size(dev, class),
692 16, NVOBJ_FLAG_ZERO_ALLOC |
693 NVOBJ_FLAG_ZERO_FREE, gpuobj);
694 if (ret) {
695 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
696 return ret;
697 }
698
699 instmem->prepare_access(dev, true);
700
701 if (dev_priv->card_type < NV_50) {
702 uint32_t frame, adjust, pte_flags = 0;
703
704 if (access != NV_DMA_ACCESS_RO)
705 pte_flags |= (1<<1);
706 adjust = offset & 0x00000fff;
707 frame = offset & ~0x00000fff;
708
709 nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
710 (adjust << 20) |
711 (access << 14) |
712 (target << 16) |
713 class));
714 nv_wo32(dev, *gpuobj, 1, size - 1);
715 nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
716 nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
717 } else {
718 uint64_t limit = offset + size - 1;
719 uint32_t flags0, flags5;
720
721 if (target == NV_DMA_TARGET_VIDMEM) {
722 flags0 = 0x00190000;
723 flags5 = 0x00010000;
724 } else {
725 flags0 = 0x7fc00000;
726 flags5 = 0x00080000;
727 }
728
729 nv_wo32(dev, *gpuobj, 0, flags0 | class);
730 nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
731 nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
732 nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
733 (upper_32_bits(offset) & 0xff));
734 nv_wo32(dev, *gpuobj, 5, flags5);
735 }
736
737 instmem->finish_access(dev);
738
739 (*gpuobj)->engine = NVOBJ_ENGINE_SW;
740 (*gpuobj)->class = class;
741 return 0;
742}
743
744int
745nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
746 uint64_t offset, uint64_t size, int access,
747 struct nouveau_gpuobj **gpuobj,
748 uint32_t *o_ret)
749{
750 struct drm_device *dev = chan->dev;
751 struct drm_nouveau_private *dev_priv = dev->dev_private;
752 int ret;
753
754 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
755 (dev_priv->card_type >= NV_50 &&
756 dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
757 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
758 offset + dev_priv->vm_gart_base,
759 size, access, NV_DMA_TARGET_AGP,
760 gpuobj);
761 if (o_ret)
762 *o_ret = 0;
763 } else
764 if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
765 *gpuobj = dev_priv->gart_info.sg_ctxdma;
766 if (offset & ~0xffffffffULL) {
767 NV_ERROR(dev, "obj offset exceeds 32-bits\n");
768 return -EINVAL;
769 }
770 if (o_ret)
771 *o_ret = (uint32_t)offset;
772 ret = (*gpuobj != NULL) ? 0 : -EINVAL;
773 } else {
774 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
775 return -EINVAL;
776 }
777
778 return ret;
779}
780
781/* Context objects in the instance RAM have the following structure.
782 * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
783
784 NV4 - NV30:
785
786 entry[0]
787 11:0 class
788 12 chroma key enable
789 13 user clip enable
790 14 swizzle enable
791 17:15 patch config:
792 scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
793 18 synchronize enable
794 19 endian: 1 big, 0 little
795 21:20 dither mode
796 23 single step enable
797 24 patch status: 0 invalid, 1 valid
798 25 context_surface 0: 1 valid
799 26 context surface 1: 1 valid
800 27 context pattern: 1 valid
801 28 context rop: 1 valid
802 29,30 context beta, beta4
803 entry[1]
804 7:0 mono format
805 15:8 color format
806 31:16 notify instance address
807 entry[2]
808 15:0 dma 0 instance address
809 31:16 dma 1 instance address
810 entry[3]
811 dma method traps
812
813 NV40:
814 No idea what the exact format is. Here's what can be deducted:
815
816 entry[0]:
817 11:0 class (maybe uses more bits here?)
818 17 user clip enable
819 21:19 patch config
820 25 patch status valid ?
821 entry[1]:
822 15:0 DMA notifier (maybe 20:0)
823 entry[2]:
824 15:0 DMA 0 instance (maybe 20:0)
825 24 big endian
826 entry[3]:
827 15:0 DMA 1 instance (maybe 20:0)
828 entry[4]:
829 entry[5]:
830 set to 0?
831*/
832int
833nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
834 struct nouveau_gpuobj **gpuobj)
835{
836 struct drm_device *dev = chan->dev;
837 struct drm_nouveau_private *dev_priv = dev->dev_private;
838 int ret;
839
840 NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
841
842 ret = nouveau_gpuobj_new(dev, chan,
843 nouveau_gpuobj_class_instmem_size(dev, class),
844 16,
845 NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
846 gpuobj);
847 if (ret) {
848 NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
849 return ret;
850 }
851
852 dev_priv->engine.instmem.prepare_access(dev, true);
853 if (dev_priv->card_type >= NV_50) {
854 nv_wo32(dev, *gpuobj, 0, class);
855 nv_wo32(dev, *gpuobj, 5, 0x00010000);
856 } else {
857 switch (class) {
858 case NV_CLASS_NULL:
859 nv_wo32(dev, *gpuobj, 0, 0x00001030);
860 nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
861 break;
862 default:
863 if (dev_priv->card_type >= NV_40) {
864 nv_wo32(dev, *gpuobj, 0, class);
865#ifdef __BIG_ENDIAN
866 nv_wo32(dev, *gpuobj, 2, 0x01000000);
867#endif
868 } else {
869#ifdef __BIG_ENDIAN
870 nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
871#else
872 nv_wo32(dev, *gpuobj, 0, class);
873#endif
874 }
875 }
876 }
877 dev_priv->engine.instmem.finish_access(dev);
878
879 (*gpuobj)->engine = NVOBJ_ENGINE_GR;
880 (*gpuobj)->class = class;
881 return 0;
882}
883
884static int
885nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
886 struct nouveau_gpuobj **gpuobj_ret)
887{
888 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
889 struct nouveau_gpuobj *gpuobj;
890
891 if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
892 return -EINVAL;
893
894 gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
895 if (!gpuobj)
896 return -ENOMEM;
897 gpuobj->engine = NVOBJ_ENGINE_SW;
898 gpuobj->class = class;
899
900 list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
901 *gpuobj_ret = gpuobj;
902 return 0;
903}
904
905static int
906nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
907{
908 struct drm_device *dev = chan->dev;
909 struct drm_nouveau_private *dev_priv = dev->dev_private;
910 struct nouveau_gpuobj *pramin = NULL;
911 uint32_t size;
912 uint32_t base;
913 int ret;
914
915 NV_DEBUG(dev, "ch%d\n", chan->id);
916
917 /* Base amount for object storage (4KiB enough?) */
918 size = 0x1000;
919 base = 0;
920
921 /* PGRAPH context */
922
923 if (dev_priv->card_type == NV_50) {
924 /* Various fixed table thingos */
925 size += 0x1400; /* mostly unknown stuff */
926 size += 0x4000; /* vm pd */
927 base = 0x6000;
928 /* RAMHT, not sure about setting size yet, 32KiB to be safe */
929 size += 0x8000;
930 /* RAMFC */
931 size += 0x1000;
932 /* PGRAPH context */
933 size += 0x70000;
934 }
935
936 NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
937 chan->id, size, base);
938 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
939 &chan->ramin);
940 if (ret) {
941 NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
942 return ret;
943 }
944 pramin = chan->ramin->gpuobj;
945
946 ret = nouveau_mem_init_heap(&chan->ramin_heap,
947 pramin->im_pramin->start + base, size);
948 if (ret) {
949 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
950 nouveau_gpuobj_ref_del(dev, &chan->ramin);
951 return ret;
952 }
953
954 return 0;
955}
956
957int
958nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
959 uint32_t vram_h, uint32_t tt_h)
960{
961 struct drm_device *dev = chan->dev;
962 struct drm_nouveau_private *dev_priv = dev->dev_private;
963 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
964 struct nouveau_gpuobj *vram = NULL, *tt = NULL;
965 int ret, i;
966
967 INIT_LIST_HEAD(&chan->ramht_refs);
968
969 NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
970
971 /* Reserve a block of PRAMIN for the channel
972 *XXX: maybe on <NV50 too at some point
973 */
974 if (0 || dev_priv->card_type == NV_50) {
975 ret = nouveau_gpuobj_channel_init_pramin(chan);
976 if (ret) {
977 NV_ERROR(dev, "init pramin\n");
978 return ret;
979 }
980 }
981
982 /* NV50 VM
983 * - Allocate per-channel page-directory
984 * - Map GART and VRAM into the channel's address space at the
985 * locations determined during init.
986 */
987 if (dev_priv->card_type >= NV_50) {
988 uint32_t vm_offset, pde;
989
990 instmem->prepare_access(dev, true);
991
992 vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
993 vm_offset += chan->ramin->gpuobj->im_pramin->start;
994
995 ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
996 0, &chan->vm_pd, NULL);
997 if (ret) {
998 instmem->finish_access(dev);
999 return ret;
1000 }
1001 for (i = 0; i < 0x4000; i += 8) {
1002 nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
1003 nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
1004 }
1005
1006 pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
1007 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1008 dev_priv->gart_info.sg_ctxdma,
1009 &chan->vm_gart_pt);
1010 if (ret) {
1011 instmem->finish_access(dev);
1012 return ret;
1013 }
1014 nv_wo32(dev, chan->vm_pd, pde++,
1015 chan->vm_gart_pt->instance | 0x03);
1016 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1017
1018 pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
1019 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
1020 ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
1021 dev_priv->vm_vram_pt[i],
1022 &chan->vm_vram_pt[i]);
1023 if (ret) {
1024 instmem->finish_access(dev);
1025 return ret;
1026 }
1027
1028 nv_wo32(dev, chan->vm_pd, pde++,
1029 chan->vm_vram_pt[i]->instance | 0x61);
1030 nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
1031 }
1032
1033 instmem->finish_access(dev);
1034 }
1035
1036 /* RAMHT */
1037 if (dev_priv->card_type < NV_50) {
1038 ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
1039 &chan->ramht);
1040 if (ret)
1041 return ret;
1042 } else {
1043 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
1044 0x8000, 16,
1045 NVOBJ_FLAG_ZERO_ALLOC,
1046 &chan->ramht);
1047 if (ret)
1048 return ret;
1049 }
1050
1051 /* VRAM ctxdma */
1052 if (dev_priv->card_type >= NV_50) {
1053 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1054 0, dev_priv->vm_end,
1055 NV_DMA_ACCESS_RW,
1056 NV_DMA_TARGET_AGP, &vram);
1057 if (ret) {
1058 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1059 return ret;
1060 }
1061 } else {
1062 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
1063 0, dev_priv->fb_available_size,
1064 NV_DMA_ACCESS_RW,
1065 NV_DMA_TARGET_VIDMEM, &vram);
1066 if (ret) {
1067 NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
1068 return ret;
1069 }
1070 }
1071
1072 ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
1073 if (ret) {
1074 NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
1075 return ret;
1076 }
1077
1078 /* TT memory ctxdma */
1079 if (dev_priv->card_type >= NV_50) {
1080 tt = vram;
1081 } else
1082 if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
1083 ret = nouveau_gpuobj_gart_dma_new(chan, 0,
1084 dev_priv->gart_info.aper_size,
1085 NV_DMA_ACCESS_RW, &tt, NULL);
1086 } else {
1087 NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
1088 ret = -EINVAL;
1089 }
1090
1091 if (ret) {
1092 NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
1093 return ret;
1094 }
1095
1096 ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
1097 if (ret) {
1098 NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
1099 return ret;
1100 }
1101
1102 return 0;
1103}
1104
1105void
1106nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
1107{
1108 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
1109 struct drm_device *dev = chan->dev;
1110 struct list_head *entry, *tmp;
1111 struct nouveau_gpuobj_ref *ref;
1112 int i;
1113
1114 NV_DEBUG(dev, "ch%d\n", chan->id);
1115
1116 if (!chan->ramht_refs.next)
1117 return;
1118
1119 list_for_each_safe(entry, tmp, &chan->ramht_refs) {
1120 ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
1121
1122 nouveau_gpuobj_ref_del(dev, &ref);
1123 }
1124
1125 nouveau_gpuobj_ref_del(dev, &chan->ramht);
1126
1127 nouveau_gpuobj_del(dev, &chan->vm_pd);
1128 nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
1129 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
1130 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
1131
1132 if (chan->ramin_heap)
1133 nouveau_mem_takedown(&chan->ramin_heap);
1134 if (chan->ramin)
1135 nouveau_gpuobj_ref_del(dev, &chan->ramin);
1136
1137}
1138
1139int
1140nouveau_gpuobj_suspend(struct drm_device *dev)
1141{
1142 struct drm_nouveau_private *dev_priv = dev->dev_private;
1143 struct nouveau_gpuobj *gpuobj;
1144 int i;
1145
1146 if (dev_priv->card_type < NV_50) {
1147 dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
1148 if (!dev_priv->susres.ramin_copy)
1149 return -ENOMEM;
1150
1151 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1152 dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
1153 return 0;
1154 }
1155
1156 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1157 if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
1158 continue;
1159
1160 gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
1161 if (!gpuobj->im_backing_suspend) {
1162 nouveau_gpuobj_resume(dev);
1163 return -ENOMEM;
1164 }
1165
1166 dev_priv->engine.instmem.prepare_access(dev, false);
1167 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1168 gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
1169 dev_priv->engine.instmem.finish_access(dev);
1170 }
1171
1172 return 0;
1173}
1174
1175void
1176nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
1177{
1178 struct drm_nouveau_private *dev_priv = dev->dev_private;
1179 struct nouveau_gpuobj *gpuobj;
1180
1181 if (dev_priv->card_type < NV_50) {
1182 vfree(dev_priv->susres.ramin_copy);
1183 dev_priv->susres.ramin_copy = NULL;
1184 return;
1185 }
1186
1187 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1188 if (!gpuobj->im_backing_suspend)
1189 continue;
1190
1191 vfree(gpuobj->im_backing_suspend);
1192 gpuobj->im_backing_suspend = NULL;
1193 }
1194}
1195
1196void
1197nouveau_gpuobj_resume(struct drm_device *dev)
1198{
1199 struct drm_nouveau_private *dev_priv = dev->dev_private;
1200 struct nouveau_gpuobj *gpuobj;
1201 int i;
1202
1203 if (dev_priv->card_type < NV_50) {
1204 for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
1205 nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
1206 nouveau_gpuobj_suspend_cleanup(dev);
1207 return;
1208 }
1209
1210 list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
1211 if (!gpuobj->im_backing_suspend)
1212 continue;
1213
1214 dev_priv->engine.instmem.prepare_access(dev, true);
1215 for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
1216 nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
1217 dev_priv->engine.instmem.finish_access(dev);
1218 }
1219
1220 nouveau_gpuobj_suspend_cleanup(dev);
1221}
1222
1223int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
1224 struct drm_file *file_priv)
1225{
1226 struct drm_nouveau_private *dev_priv = dev->dev_private;
1227 struct drm_nouveau_grobj_alloc *init = data;
1228 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
1229 struct nouveau_pgraph_object_class *grc;
1230 struct nouveau_gpuobj *gr = NULL;
1231 struct nouveau_channel *chan;
1232 int ret;
1233
1234 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1235 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
1236
1237 if (init->handle == ~0)
1238 return -EINVAL;
1239
1240 grc = pgraph->grclass;
1241 while (grc->id) {
1242 if (grc->id == init->class)
1243 break;
1244 grc++;
1245 }
1246
1247 if (!grc->id) {
1248 NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
1249 return -EPERM;
1250 }
1251
1252 if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
1253 return -EEXIST;
1254
1255 if (!grc->software)
1256 ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
1257 else
1258 ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
1259
1260 if (ret) {
1261 NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
1262 ret, init->channel, init->handle);
1263 return ret;
1264 }
1265
1266 ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
1267 if (ret) {
1268 NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
1269 ret, init->channel, init->handle);
1270 nouveau_gpuobj_del(dev, &gr);
1271 return ret;
1272 }
1273
1274 return 0;
1275}
1276
1277int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
1278 struct drm_file *file_priv)
1279{
1280 struct drm_nouveau_gpuobj_free *objfree = data;
1281 struct nouveau_gpuobj_ref *ref;
1282 struct nouveau_channel *chan;
1283 int ret;
1284
1285 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
1286 NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
1287
1288 ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
1289 if (ret)
1290 return ret;
1291 nouveau_gpuobj_ref_del(dev, &ref);
1292
1293 return 0;
1294}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 000000000000..fa1b0e7165b9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,836 @@
1
2
3#define NV03_BOOT_0 0x00100000
4# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
5# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
6# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
7# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
8# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
9# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
10# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
11# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
12# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
13
14#define NV04_FIFO_DATA 0x0010020c
15# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
16# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
17
18#define NV_RAMIN 0x00700000
19
20#define NV_RAMHT_HANDLE_OFFSET 0
21#define NV_RAMHT_CONTEXT_OFFSET 4
22# define NV_RAMHT_CONTEXT_VALID (1<<31)
23# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
24# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
25# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
26# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
27# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
28# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
29# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
30# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
31
32/* DMA object defines */
33#define NV_DMA_ACCESS_RW 0
34#define NV_DMA_ACCESS_RO 1
35#define NV_DMA_ACCESS_WO 2
36#define NV_DMA_TARGET_VIDMEM 0
37#define NV_DMA_TARGET_PCI 2
38#define NV_DMA_TARGET_AGP 3
39/* The following is not a real value used by the card, it's changed by
40 * nouveau_object_dma_create */
41#define NV_DMA_TARGET_PCI_NONLINEAR 8
42
43/* Some object classes we care about in the drm */
44#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
45#define NV_CLASS_DMA_TO_MEMORY 0x00000003
46#define NV_CLASS_NULL 0x00000030
47#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
48
49#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE))
50#define NV03_USER__SIZE 16
51#define NV10_USER__SIZE 32
52#define NV03_USER_SIZE 0x00010000
53#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE))
54#define NV03_USER_DMA_PUT__SIZE 16
55#define NV10_USER_DMA_PUT__SIZE 32
56#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE))
57#define NV03_USER_DMA_GET__SIZE 16
58#define NV10_USER_DMA_GET__SIZE 32
59#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE))
60#define NV03_USER_REF_CNT__SIZE 16
61#define NV10_USER_REF_CNT__SIZE 32
62
63#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE))
64#define NV40_USER_SIZE 0x00001000
65#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE))
66#define NV40_USER_DMA_PUT__SIZE 32
67#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE))
68#define NV40_USER_DMA_GET__SIZE 32
69#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE))
70#define NV40_USER_REF_CNT__SIZE 32
71
72#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE))
73#define NV50_USER_SIZE 0x00002000
74#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE))
75#define NV50_USER_DMA_PUT__SIZE 128
76#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE))
77#define NV50_USER_DMA_GET__SIZE 128
78#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE))
79#define NV50_USER_REF_CNT__SIZE 128
80
81#define NV03_FIFO_SIZE 0x8000UL
82
83#define NV03_PMC_BOOT_0 0x00000000
84#define NV03_PMC_BOOT_1 0x00000004
85#define NV03_PMC_INTR_0 0x00000100
86# define NV_PMC_INTR_0_PFIFO_PENDING (1<<8)
87# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
88# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
89# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
90# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
91# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
92# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
93#define NV03_PMC_INTR_EN_0 0x00000140
94# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<<0)
95#define NV03_PMC_ENABLE 0x00000200
96# define NV_PMC_ENABLE_PFIFO (1<<8)
97# define NV_PMC_ENABLE_PGRAPH (1<<12)
98/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
99 * the card will hang early on in the X init process.
100 */
101# define NV_PMC_ENABLE_UNK13 (1<<13)
102#define NV40_PMC_BACKLIGHT 0x000015f0
103# define NV40_PMC_BACKLIGHT_MASK 0x001f0000
104#define NV40_PMC_1700 0x00001700
105#define NV40_PMC_1704 0x00001704
106#define NV40_PMC_1708 0x00001708
107#define NV40_PMC_170C 0x0000170C
108
109/* probably PMC ? */
110#define NV50_PUNK_BAR0_PRAMIN 0x00001700
111#define NV50_PUNK_BAR_CFG_BASE 0x00001704
112#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
113#define NV50_PUNK_BAR1_CTXDMA 0x00001708
114#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
115#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
116#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
117#define NV50_PUNK_UNK1710 0x00001710
118
119#define NV04_PBUS_PCI_NV_1 0x00001804
120#define NV04_PBUS_PCI_NV_19 0x0000184C
121#define NV04_PBUS_PCI_NV_20 0x00001850
122# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
123# define NV04_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
124
125#define NV04_PTIMER_INTR_0 0x00009100
126#define NV04_PTIMER_INTR_EN_0 0x00009140
127#define NV04_PTIMER_NUMERATOR 0x00009200
128#define NV04_PTIMER_DENOMINATOR 0x00009210
129#define NV04_PTIMER_TIME_0 0x00009400
130#define NV04_PTIMER_TIME_1 0x00009410
131#define NV04_PTIMER_ALARM_0 0x00009420
132
133#define NV04_PFB_CFG0 0x00100200
134#define NV04_PFB_CFG1 0x00100204
135#define NV40_PFB_020C 0x0010020C
136#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
137#define NV10_PFB_TILE__SIZE 8
138#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
139#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
140#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
141#define NV10_PFB_CLOSE_PAGE2 0x0010033C
142#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
143#define NV40_PFB_TILE__SIZE_0 12
144#define NV40_PFB_TILE__SIZE_1 15
145#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
146#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
147#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
148#define NV40_PFB_UNK_800 0x00100800
149
150#define NV04_PGRAPH_DEBUG_0 0x00400080
151#define NV04_PGRAPH_DEBUG_1 0x00400084
152#define NV04_PGRAPH_DEBUG_2 0x00400088
153#define NV04_PGRAPH_DEBUG_3 0x0040008c
154#define NV10_PGRAPH_DEBUG_4 0x00400090
155#define NV03_PGRAPH_INTR 0x00400100
156#define NV03_PGRAPH_NSTATUS 0x00400104
157# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
158# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
159# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
160# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
161# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
162# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
163# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
164# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
165#define NV03_PGRAPH_NSOURCE 0x00400108
166# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<<0)
167# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<<1)
168# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<<2)
169# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<<3)
170# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<<4)
171# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<<5)
172# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<<6)
173# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<<7)
174# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<<8)
175# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<<9)
176# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
177# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
178# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
179# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
180# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
181# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
182# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
183# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
184# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
185#define NV03_PGRAPH_INTR_EN 0x00400140
186#define NV40_PGRAPH_INTR_EN 0x0040013C
187# define NV_PGRAPH_INTR_NOTIFY (1<<0)
188# define NV_PGRAPH_INTR_MISSING_HW (1<<4)
189# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
190# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
191# define NV_PGRAPH_INTR_ERROR (1<<20)
192#define NV10_PGRAPH_CTX_CONTROL 0x00400144
193#define NV10_PGRAPH_CTX_USER 0x00400148
194#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
195#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
196#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
197#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
198#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
199#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
200#define NV10_PGRAPH_CTX_CACHE1 0x00400160
201#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
202#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
203#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
204#define NV04_PGRAPH_CTX_CONTROL 0x00400170
205#define NV04_PGRAPH_CTX_USER 0x00400174
206#define NV04_PGRAPH_CTX_CACHE1 0x00400180
207#define NV10_PGRAPH_CTX_CACHE2 0x00400180
208#define NV03_PGRAPH_CTX_CONTROL 0x00400190
209#define NV03_PGRAPH_CTX_USER 0x00400194
210#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
211#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
212#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
213#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
214#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
215#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
216#define NV40_PGRAPH_CTXCTL_0304 0x00400304
217#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
218#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
219#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
220#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
221#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
222#define NV40_PGRAPH_CTXCTL_0310 0x00400310
223#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
224#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
225#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
226#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
227#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
228#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
229#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
230#define NV40_PGRAPH_CTXCTL_CUR_INSTANCE 0x000FFFFF
231#define NV40_PGRAPH_CTXCTL_NEXT 0x00400330
232#define NV40_PGRAPH_CTXCTL_NEXT_INSTANCE 0x000fffff
233#define NV50_PGRAPH_CTXCTL_CUR 0x0040032c
234#define NV50_PGRAPH_CTXCTL_CUR_LOADED 0x80000000
235#define NV50_PGRAPH_CTXCTL_CUR_INSTANCE 0x00ffffff
236#define NV50_PGRAPH_CTXCTL_NEXT 0x00400330
237#define NV50_PGRAPH_CTXCTL_NEXT_INSTANCE 0x00ffffff
238#define NV03_PGRAPH_ABS_X_RAM 0x00400400
239#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
240#define NV03_PGRAPH_X_MISC 0x00400500
241#define NV03_PGRAPH_Y_MISC 0x00400504
242#define NV04_PGRAPH_VALID1 0x00400508
243#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
244#define NV04_PGRAPH_MISC24_0 0x00400510
245#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
246#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
247#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
248#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
249#define NV03_PGRAPH_CLIPX_0 0x00400524
250#define NV03_PGRAPH_CLIPX_1 0x00400528
251#define NV03_PGRAPH_CLIPY_0 0x0040052C
252#define NV03_PGRAPH_CLIPY_1 0x00400530
253#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
254#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
255#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
256#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
257#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
258#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
259#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
260#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
261#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
262#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
263#define NV04_PGRAPH_MISC24_1 0x00400570
264#define NV04_PGRAPH_MISC24_2 0x00400574
265#define NV04_PGRAPH_VALID2 0x00400578
266#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
267#define NV04_PGRAPH_PASSTHRU_1 0x00400580
268#define NV04_PGRAPH_PASSTHRU_2 0x00400584
269#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
270#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
271#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
272#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
273#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
274#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
275#define NV04_PGRAPH_FORMAT_0 0x004005A8
276#define NV04_PGRAPH_FORMAT_1 0x004005AC
277#define NV04_PGRAPH_FILTER_0 0x004005B0
278#define NV04_PGRAPH_FILTER_1 0x004005B4
279#define NV03_PGRAPH_MONO_COLOR0 0x00400600
280#define NV04_PGRAPH_ROP3 0x00400604
281#define NV04_PGRAPH_BETA_AND 0x00400608
282#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
283#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
284#define NV04_PGRAPH_FORMATS 0x00400618
285#define NV10_PGRAPH_DEBUG_2 0x00400620
286#define NV04_PGRAPH_BOFFSET0 0x00400640
287#define NV04_PGRAPH_BOFFSET1 0x00400644
288#define NV04_PGRAPH_BOFFSET2 0x00400648
289#define NV04_PGRAPH_BOFFSET3 0x0040064C
290#define NV04_PGRAPH_BOFFSET4 0x00400650
291#define NV04_PGRAPH_BOFFSET5 0x00400654
292#define NV04_PGRAPH_BBASE0 0x00400658
293#define NV04_PGRAPH_BBASE1 0x0040065C
294#define NV04_PGRAPH_BBASE2 0x00400660
295#define NV04_PGRAPH_BBASE3 0x00400664
296#define NV04_PGRAPH_BBASE4 0x00400668
297#define NV04_PGRAPH_BBASE5 0x0040066C
298#define NV04_PGRAPH_BPITCH0 0x00400670
299#define NV04_PGRAPH_BPITCH1 0x00400674
300#define NV04_PGRAPH_BPITCH2 0x00400678
301#define NV04_PGRAPH_BPITCH3 0x0040067C
302#define NV04_PGRAPH_BPITCH4 0x00400680
303#define NV04_PGRAPH_BLIMIT0 0x00400684
304#define NV04_PGRAPH_BLIMIT1 0x00400688
305#define NV04_PGRAPH_BLIMIT2 0x0040068C
306#define NV04_PGRAPH_BLIMIT3 0x00400690
307#define NV04_PGRAPH_BLIMIT4 0x00400694
308#define NV04_PGRAPH_BLIMIT5 0x00400698
309#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
310#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
311#define NV03_PGRAPH_STATUS 0x004006B0
312#define NV04_PGRAPH_STATUS 0x00400700
313#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
314#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
315#define NV04_PGRAPH_SURFACE 0x0040070C
316#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
317#define NV04_PGRAPH_STATE 0x00400710
318#define NV10_PGRAPH_SURFACE 0x00400710
319#define NV04_PGRAPH_NOTIFY 0x00400714
320#define NV10_PGRAPH_STATE 0x00400714
321#define NV10_PGRAPH_NOTIFY 0x00400718
322
323#define NV04_PGRAPH_FIFO 0x00400720
324
325#define NV04_PGRAPH_BPIXEL 0x00400724
326#define NV10_PGRAPH_RDI_INDEX 0x00400750
327#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
328#define NV10_PGRAPH_RDI_DATA 0x00400754
329#define NV04_PGRAPH_DMA_PITCH 0x00400760
330#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
331#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
332#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
333#define NV10_PGRAPH_DMA_PITCH 0x00400770
334#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
335#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
336#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
337#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
338#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
339#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
340#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
341#define NV04_PGRAPH_PATT_COLOR0 0x00400800
342#define NV04_PGRAPH_PATT_COLOR1 0x00400804
343#define NV04_PGRAPH_PATTERN 0x00400808
344#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
345#define NV04_PGRAPH_CHROMA 0x00400814
346#define NV04_PGRAPH_CONTROL0 0x00400818
347#define NV04_PGRAPH_CONTROL1 0x0040081C
348#define NV04_PGRAPH_CONTROL2 0x00400820
349#define NV04_PGRAPH_BLEND 0x00400824
350#define NV04_PGRAPH_STORED_FMT 0x00400830
351#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
352#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
353#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
354#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
355#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
356#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
357#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
358#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
359#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
360#define NV04_PGRAPH_U_RAM 0x00400D00
361#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
362#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
363#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
364#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
365#define NV04_PGRAPH_V_RAM 0x00400D40
366#define NV04_PGRAPH_W_RAM 0x00400D80
367#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
368#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
369#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
370#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
371#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
372#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
373#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
374#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
375#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
376#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
377#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
378#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
379#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
380#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
381#define NV10_PGRAPH_XFMODE0 0x00400F40
382#define NV10_PGRAPH_XFMODE1 0x00400F44
383#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
384#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
385#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
386#define NV10_PGRAPH_PIPE_DATA 0x00400F54
387#define NV04_PGRAPH_DMA_START_0 0x00401000
388#define NV04_PGRAPH_DMA_START_1 0x00401004
389#define NV04_PGRAPH_DMA_LENGTH 0x00401008
390#define NV04_PGRAPH_DMA_MISC 0x0040100C
391#define NV04_PGRAPH_DMA_DATA_0 0x00401020
392#define NV04_PGRAPH_DMA_DATA_1 0x00401024
393#define NV04_PGRAPH_DMA_RM 0x00401030
394#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
395#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
396#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
397#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
398#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
399#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
400#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
401#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
402#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
403#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
404#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
405#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
406#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
407#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
408#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
409#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
410#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
411#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
412#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
413#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
414#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
415#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
416
417
418/* It's a guess that this works on NV03. Confirmed on NV04, though */
419#define NV04_PFIFO_DELAY_0 0x00002040
420#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
421#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
422#define NV03_PFIFO_INTR_0 0x00002100
423#define NV03_PFIFO_INTR_EN_0 0x00002140
424# define NV_PFIFO_INTR_CACHE_ERROR (1<<0)
425# define NV_PFIFO_INTR_RUNOUT (1<<4)
426# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<<8)
427# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
428# define NV_PFIFO_INTR_DMA_PT (1<<16)
429# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
430# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
431#define NV03_PFIFO_RAMHT 0x00002210
432#define NV03_PFIFO_RAMFC 0x00002214
433#define NV03_PFIFO_RAMRO 0x00002218
434#define NV40_PFIFO_RAMFC 0x00002220
435#define NV03_PFIFO_CACHES 0x00002500
436#define NV04_PFIFO_MODE 0x00002504
437#define NV04_PFIFO_DMA 0x00002508
438#define NV04_PFIFO_SIZE 0x0000250c
439#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
440#define NV50_PFIFO_CTX_TABLE__SIZE 128
441#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
442#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
443#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
444#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
445#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
446#define NV03_PFIFO_CACHE0_PULL0 0x00003040
447#define NV04_PFIFO_CACHE0_PULL0 0x00003050
448#define NV04_PFIFO_CACHE0_PULL1 0x00003054
449#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
450#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
451#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
452#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
453#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
454#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
455#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
456#define NV03_PFIFO_CACHE1_PUT 0x00003210
457#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
458#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
459# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
460# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
461# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
462# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
463# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
464# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
465# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
466# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
467# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
468# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
469# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
470# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
471# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
472# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
473# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
474# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
475# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
476# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
477# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
478# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
479# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
480# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
481# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
482# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
483# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
484# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
485# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
486# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
487# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
488# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
489# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
490# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
491# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
492# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
493# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
494# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
495# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
496# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
497# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
498# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
499# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
500# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
501# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
502# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
503# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
504# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
505# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
506# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
507# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
508# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
509# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
510# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
511# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
512# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
513# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
514# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
515# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
516# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
517# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
518# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
519# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
520#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
521#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
522#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
523#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
524#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
525#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
526#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
527#define NV03_PFIFO_CACHE1_PULL0 0x00003240
528#define NV04_PFIFO_CACHE1_PULL0 0x00003250
529#define NV03_PFIFO_CACHE1_PULL1 0x00003250
530#define NV04_PFIFO_CACHE1_PULL1 0x00003254
531#define NV04_PFIFO_CACHE1_HASH 0x00003258
532#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
533#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
534#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
535#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
536#define NV03_PFIFO_CACHE1_GET 0x00003270
537#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
538#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
539#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
540#define NV40_PFIFO_UNK32E4 0x000032E4
541#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
542#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
543#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
544#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
545
546#define NV_CRTC0_INTSTAT 0x00600100
547#define NV_CRTC0_INTEN 0x00600140
548#define NV_CRTC1_INTSTAT 0x00602100
549#define NV_CRTC1_INTEN 0x00602140
550# define NV_CRTC_INTR_VBLANK (1<<0)
551
552#define NV04_PRAMIN 0x00700000
553
554/* Fifo commands. These are not regs, neither masks */
555#define NV03_FIFO_CMD_JUMP 0x20000000
556#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
557#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
558
559/* This is a partial import from rules-ng, a few things may be duplicated.
560 * Eventually we should completely import everything from rules-ng.
561 * For the moment check rules-ng for docs.
562 */
563
564#define NV50_PMC 0x00000000
565#define NV50_PMC__LEN 0x1
566#define NV50_PMC__ESIZE 0x2000
567# define NV50_PMC_BOOT_0 0x00000000
568# define NV50_PMC_BOOT_0_REVISION 0x000000ff
569# define NV50_PMC_BOOT_0_REVISION__SHIFT 0
570# define NV50_PMC_BOOT_0_ARCH 0x0ff00000
571# define NV50_PMC_BOOT_0_ARCH__SHIFT 20
572# define NV50_PMC_INTR_0 0x00000100
573# define NV50_PMC_INTR_0_PFIFO (1<<8)
574# define NV50_PMC_INTR_0_PGRAPH (1<<12)
575# define NV50_PMC_INTR_0_PTIMER (1<<20)
576# define NV50_PMC_INTR_0_HOTPLUG (1<<21)
577# define NV50_PMC_INTR_0_DISPLAY (1<<26)
578# define NV50_PMC_INTR_EN_0 0x00000140
579# define NV50_PMC_INTR_EN_0_MASTER (1<<0)
580# define NV50_PMC_INTR_EN_0_MASTER_DISABLED (0<<0)
581# define NV50_PMC_INTR_EN_0_MASTER_ENABLED (1<<0)
582# define NV50_PMC_ENABLE 0x00000200
583# define NV50_PMC_ENABLE_PFIFO (1<<8)
584# define NV50_PMC_ENABLE_PGRAPH (1<<12)
585
586#define NV50_PCONNECTOR 0x0000e000
587#define NV50_PCONNECTOR__LEN 0x1
588#define NV50_PCONNECTOR__ESIZE 0x1000
589# define NV50_PCONNECTOR_HOTPLUG_INTR 0x0000e050
590# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C0 (1<<0)
591# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C1 (1<<1)
592# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C2 (1<<2)
593# define NV50_PCONNECTOR_HOTPLUG_INTR_PLUG_I2C3 (1<<3)
594# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C0 (1<<16)
595# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C1 (1<<17)
596# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C2 (1<<18)
597# define NV50_PCONNECTOR_HOTPLUG_INTR_UNPLUG_I2C3 (1<<19)
598# define NV50_PCONNECTOR_HOTPLUG_CTRL 0x0000e054
599# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C0 (1<<0)
600# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C1 (1<<1)
601# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C2 (1<<2)
602# define NV50_PCONNECTOR_HOTPLUG_CTRL_PLUG_I2C3 (1<<3)
603# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C0 (1<<16)
604# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C1 (1<<17)
605# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C2 (1<<18)
606# define NV50_PCONNECTOR_HOTPLUG_CTRL_UNPLUG_I2C3 (1<<19)
607# define NV50_PCONNECTOR_HOTPLUG_STATE 0x0000e104
608# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C0 (1<<2)
609# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C1 (1<<6)
610# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C2 (1<<10)
611# define NV50_PCONNECTOR_HOTPLUG_STATE_PIN_CONNECTED_I2C3 (1<<14)
612# define NV50_PCONNECTOR_I2C_PORT_0 0x0000e138
613# define NV50_PCONNECTOR_I2C_PORT_1 0x0000e150
614# define NV50_PCONNECTOR_I2C_PORT_2 0x0000e168
615# define NV50_PCONNECTOR_I2C_PORT_3 0x0000e180
616# define NV50_PCONNECTOR_I2C_PORT_4 0x0000e240
617# define NV50_PCONNECTOR_I2C_PORT_5 0x0000e258
618
619#define NV50_AUXCH_DATA_OUT(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4c0)
620#define NV50_AUXCH_DATA_OUT__SIZE 4
621#define NV50_AUXCH_DATA_IN(i,n) ((n) * 4 + (i) * 0x50 + 0x0000e4d0)
622#define NV50_AUXCH_DATA_IN__SIZE 4
623#define NV50_AUXCH_ADDR(i) ((i) * 0x50 + 0x0000e4e0)
624#define NV50_AUXCH_CTRL(i) ((i) * 0x50 + 0x0000e4e4)
625#define NV50_AUXCH_CTRL_LINKSTAT 0x01000000
626#define NV50_AUXCH_CTRL_LINKSTAT_NOT_READY 0x00000000
627#define NV50_AUXCH_CTRL_LINKSTAT_READY 0x01000000
628#define NV50_AUXCH_CTRL_LINKEN 0x00100000
629#define NV50_AUXCH_CTRL_LINKEN_DISABLED 0x00000000
630#define NV50_AUXCH_CTRL_LINKEN_ENABLED 0x00100000
631#define NV50_AUXCH_CTRL_EXEC 0x00010000
632#define NV50_AUXCH_CTRL_EXEC_COMPLETE 0x00000000
633#define NV50_AUXCH_CTRL_EXEC_IN_PROCESS 0x00010000
634#define NV50_AUXCH_CTRL_CMD 0x0000f000
635#define NV50_AUXCH_CTRL_CMD_SHIFT 12
636#define NV50_AUXCH_CTRL_LEN 0x0000000f
637#define NV50_AUXCH_CTRL_LEN_SHIFT 0
638#define NV50_AUXCH_STAT(i) ((i) * 0x50 + 0x0000e4e8)
639#define NV50_AUXCH_STAT_STATE 0x10000000
640#define NV50_AUXCH_STAT_STATE_NOT_READY 0x00000000
641#define NV50_AUXCH_STAT_STATE_READY 0x10000000
642#define NV50_AUXCH_STAT_REPLY 0x000f0000
643#define NV50_AUXCH_STAT_REPLY_AUX 0x00030000
644#define NV50_AUXCH_STAT_REPLY_AUX_ACK 0x00000000
645#define NV50_AUXCH_STAT_REPLY_AUX_NACK 0x00010000
646#define NV50_AUXCH_STAT_REPLY_AUX_DEFER 0x00020000
647#define NV50_AUXCH_STAT_REPLY_I2C 0x000c0000
648#define NV50_AUXCH_STAT_REPLY_I2C_ACK 0x00000000
649#define NV50_AUXCH_STAT_REPLY_I2C_NACK 0x00040000
650#define NV50_AUXCH_STAT_REPLY_I2C_DEFER 0x00080000
651#define NV50_AUXCH_STAT_COUNT 0x0000001f
652
653#define NV50_PBUS 0x00088000
654#define NV50_PBUS__LEN 0x1
655#define NV50_PBUS__ESIZE 0x1000
656# define NV50_PBUS_PCI_ID 0x00088000
657# define NV50_PBUS_PCI_ID_VENDOR_ID 0x0000ffff
658# define NV50_PBUS_PCI_ID_VENDOR_ID__SHIFT 0
659# define NV50_PBUS_PCI_ID_DEVICE_ID 0xffff0000
660# define NV50_PBUS_PCI_ID_DEVICE_ID__SHIFT 16
661
662#define NV50_PFB 0x00100000
663#define NV50_PFB__LEN 0x1
664#define NV50_PFB__ESIZE 0x1000
665
666#define NV50_PEXTDEV 0x00101000
667#define NV50_PEXTDEV__LEN 0x1
668#define NV50_PEXTDEV__ESIZE 0x1000
669
670#define NV50_PROM 0x00300000
671#define NV50_PROM__LEN 0x1
672#define NV50_PROM__ESIZE 0x10000
673
674#define NV50_PGRAPH 0x00400000
675#define NV50_PGRAPH__LEN 0x1
676#define NV50_PGRAPH__ESIZE 0x10000
677
678#define NV50_PDISPLAY 0x00610000
679#define NV50_PDISPLAY_OBJECTS 0x00610010
680#define NV50_PDISPLAY_INTR_0 0x00610020
681#define NV50_PDISPLAY_INTR_1 0x00610024
682#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC 0x0000000c
683#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_SHIFT 2
684#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(n) (1 << ((n) + 2))
685#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0 0x00000004
686#define NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1 0x00000008
687#define NV50_PDISPLAY_INTR_1_CLK_UNK10 0x00000010
688#define NV50_PDISPLAY_INTR_1_CLK_UNK20 0x00000020
689#define NV50_PDISPLAY_INTR_1_CLK_UNK40 0x00000040
690#define NV50_PDISPLAY_INTR_EN 0x0061002c
691#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC 0x0000000c
692#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(n) (1 << ((n) + 2))
693#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_0 0x00000004
694#define NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_1 0x00000008
695#define NV50_PDISPLAY_INTR_EN_CLK_UNK10 0x00000010
696#define NV50_PDISPLAY_INTR_EN_CLK_UNK20 0x00000020
697#define NV50_PDISPLAY_INTR_EN_CLK_UNK40 0x00000040
698#define NV50_PDISPLAY_UNK30_CTRL 0x00610030
699#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK0 0x00000200
700#define NV50_PDISPLAY_UNK30_CTRL_UPDATE_VCLK1 0x00000400
701#define NV50_PDISPLAY_UNK30_CTRL_PENDING 0x80000000
702#define NV50_PDISPLAY_TRAPPED_ADDR 0x00610080
703#define NV50_PDISPLAY_TRAPPED_DATA 0x00610084
704#define NV50_PDISPLAY_CHANNEL_STAT(i) ((i) * 0x10 + 0x00610200)
705#define NV50_PDISPLAY_CHANNEL_STAT_DMA 0x00000010
706#define NV50_PDISPLAY_CHANNEL_STAT_DMA_DISABLED 0x00000000
707#define NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED 0x00000010
708#define NV50_PDISPLAY_CHANNEL_DMA_CB(i) ((i) * 0x10 + 0x00610204)
709#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION 0x00000002
710#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM 0x00000000
711#define NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_SYSTEM 0x00000002
712#define NV50_PDISPLAY_CHANNEL_DMA_CB_VALID 0x00000001
713#define NV50_PDISPLAY_CHANNEL_UNK2(i) ((i) * 0x10 + 0x00610208)
714#define NV50_PDISPLAY_CHANNEL_UNK3(i) ((i) * 0x10 + 0x0061020c)
715
716#define NV50_PDISPLAY_CURSOR 0x00610270
717#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i) ((i) * 0x10 + 0x00610270)
718#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON 0x00000001
719#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS 0x00030000
720#define NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE 0x00010000
721
722#define NV50_PDISPLAY_CTRL_STATE 0x00610300
723#define NV50_PDISPLAY_CTRL_STATE_PENDING 0x80000000
724#define NV50_PDISPLAY_CTRL_STATE_METHOD 0x00001ffc
725#define NV50_PDISPLAY_CTRL_STATE_ENABLE 0x00000001
726#define NV50_PDISPLAY_CTRL_VAL 0x00610304
727#define NV50_PDISPLAY_UNK_380 0x00610380
728#define NV50_PDISPLAY_RAM_AMOUNT 0x00610384
729#define NV50_PDISPLAY_UNK_388 0x00610388
730#define NV50_PDISPLAY_UNK_38C 0x0061038c
731
732#define NV50_PDISPLAY_CRTC_P(i, r) ((i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
733#define NV50_PDISPLAY_CRTC_C(i, r) (4 + (i) * 0x540 + NV50_PDISPLAY_CRTC_##r)
734#define NV50_PDISPLAY_CRTC_UNK_0A18 /* mthd 0x0900 */ 0x00610a18
735#define NV50_PDISPLAY_CRTC_CLUT_MODE 0x00610a24
736#define NV50_PDISPLAY_CRTC_INTERLACE 0x00610a48
737#define NV50_PDISPLAY_CRTC_SCALE_CTRL 0x00610a50
738#define NV50_PDISPLAY_CRTC_CURSOR_CTRL 0x00610a58
739#define NV50_PDISPLAY_CRTC_UNK0A78 /* mthd 0x0904 */ 0x00610a78
740#define NV50_PDISPLAY_CRTC_UNK0AB8 0x00610ab8
741#define NV50_PDISPLAY_CRTC_DEPTH 0x00610ac8
742#define NV50_PDISPLAY_CRTC_CLOCK 0x00610ad0
743#define NV50_PDISPLAY_CRTC_COLOR_CTRL 0x00610ae0
744#define NV50_PDISPLAY_CRTC_SYNC_START_TO_BLANK_END 0x00610ae8
745#define NV50_PDISPLAY_CRTC_MODE_UNK1 0x00610af0
746#define NV50_PDISPLAY_CRTC_DISPLAY_TOTAL 0x00610af8
747#define NV50_PDISPLAY_CRTC_SYNC_DURATION 0x00610b00
748#define NV50_PDISPLAY_CRTC_MODE_UNK2 0x00610b08
749#define NV50_PDISPLAY_CRTC_UNK_0B10 /* mthd 0x0828 */ 0x00610b10
750#define NV50_PDISPLAY_CRTC_FB_SIZE 0x00610b18
751#define NV50_PDISPLAY_CRTC_FB_PITCH 0x00610b20
752#define NV50_PDISPLAY_CRTC_FB_PITCH_LINEAR 0x00100000
753#define NV50_PDISPLAY_CRTC_FB_POS 0x00610b28
754#define NV50_PDISPLAY_CRTC_SCALE_CENTER_OFFSET 0x00610b38
755#define NV50_PDISPLAY_CRTC_REAL_RES 0x00610b40
756#define NV50_PDISPLAY_CRTC_SCALE_RES1 0x00610b48
757#define NV50_PDISPLAY_CRTC_SCALE_RES2 0x00610b50
758
759#define NV50_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
760#define NV50_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
761#define NV50_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610b70 + (i) * 0x8)
762#define NV50_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610b74 + (i) * 0x8)
763#define NV50_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610bdc + (i) * 0x8)
764#define NV50_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610be0 + (i) * 0x8)
765
766#define NV90_PDISPLAY_SOR_MODE_CTRL_P(i) (0x00610794 + (i) * 0x8)
767#define NV90_PDISPLAY_SOR_MODE_CTRL_C(i) (0x00610798 + (i) * 0x8)
768#define NV90_PDISPLAY_DAC_MODE_CTRL_P(i) (0x00610b58 + (i) * 0x8)
769#define NV90_PDISPLAY_DAC_MODE_CTRL_C(i) (0x00610b5c + (i) * 0x8)
770#define NV90_PDISPLAY_DAC_MODE_CTRL2_P(i) (0x00610b80 + (i) * 0x8)
771#define NV90_PDISPLAY_DAC_MODE_CTRL2_C(i) (0x00610b84 + (i) * 0x8)
772
773#define NV50_PDISPLAY_CRTC_CLK 0x00614000
774#define NV50_PDISPLAY_CRTC_CLK_CTRL1(i) ((i) * 0x800 + 0x614100)
775#define NV50_PDISPLAY_CRTC_CLK_CTRL1_CONNECTED 0x00000600
776#define NV50_PDISPLAY_CRTC_CLK_VPLL_A(i) ((i) * 0x800 + 0x614104)
777#define NV50_PDISPLAY_CRTC_CLK_VPLL_B(i) ((i) * 0x800 + 0x614108)
778#define NV50_PDISPLAY_CRTC_CLK_CTRL2(i) ((i) * 0x800 + 0x614200)
779
780#define NV50_PDISPLAY_DAC_CLK 0x00614000
781#define NV50_PDISPLAY_DAC_CLK_CTRL2(i) ((i) * 0x800 + 0x614280)
782
783#define NV50_PDISPLAY_SOR_CLK 0x00614000
784#define NV50_PDISPLAY_SOR_CLK_CTRL2(i) ((i) * 0x800 + 0x614300)
785
786#define NV50_PDISPLAY_VGACRTC(r) ((r) + 0x619400)
787
788#define NV50_PDISPLAY_DAC 0x0061a000
789#define NV50_PDISPLAY_DAC_DPMS_CTRL(i) (0x0061a004 + (i) * 0x800)
790#define NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF 0x00000001
791#define NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF 0x00000004
792#define NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED 0x00000010
793#define NV50_PDISPLAY_DAC_DPMS_CTRL_OFF 0x00000040
794#define NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING 0x80000000
795#define NV50_PDISPLAY_DAC_LOAD_CTRL(i) (0x0061a00c + (i) * 0x800)
796#define NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE 0x00100000
797#define NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT 0x38000000
798#define NV50_PDISPLAY_DAC_LOAD_CTRL_DONE 0x80000000
799#define NV50_PDISPLAY_DAC_CLK_CTRL1(i) (0x0061a010 + (i) * 0x800)
800#define NV50_PDISPLAY_DAC_CLK_CTRL1_CONNECTED 0x00000600
801
802#define NV50_PDISPLAY_SOR 0x0061c000
803#define NV50_PDISPLAY_SOR_DPMS_CTRL(i) (0x0061c004 + (i) * 0x800)
804#define NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING 0x80000000
805#define NV50_PDISPLAY_SOR_DPMS_CTRL_ON 0x00000001
806#define NV50_PDISPLAY_SOR_CLK_CTRL1(i) (0x0061c008 + (i) * 0x800)
807#define NV50_PDISPLAY_SOR_CLK_CTRL1_CONNECTED 0x00000600
808#define NV50_PDISPLAY_SOR_DPMS_STATE(i) (0x0061c030 + (i) * 0x800)
809#define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE 0x00030000
810#define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED 0x00080000
811#define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT 0x10000000
812#define NV50_PDISPLAY_SOR_BACKLIGHT 0x0061c084
813#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
814#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
815#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
816#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
817#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
818#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
819#define NV50_SOR_DP_CTRL_LANE_1_ENABLED 0x00020000
820#define NV50_SOR_DP_CTRL_LANE_2_ENABLED 0x00040000
821#define NV50_SOR_DP_CTRL_LANE_3_ENABLED 0x00080000
822#define NV50_SOR_DP_CTRL_TRAINING_PATTERN 0x0f000000
823#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_DISABLED 0x00000000
824#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_1 0x01000000
825#define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2 0x02000000
826#define NV50_SOR_DP_UNK118(i,l) (0x0061c118 + (i) * 0x800 + (l) * 0x80)
827#define NV50_SOR_DP_UNK120(i,l) (0x0061c120 + (i) * 0x800 + (l) * 0x80)
828#define NV50_SOR_DP_UNK130(i,l) (0x0061c130 + (i) * 0x800 + (l) * 0x80)
829
830#define NV50_PDISPLAY_USER(i) ((i) * 0x1000 + 0x00640000)
831#define NV50_PDISPLAY_USER_PUT(i) ((i) * 0x1000 + 0x00640000)
832#define NV50_PDISPLAY_USER_GET(i) ((i) * 0x1000 + 0x00640004)
833
834#define NV50_PDISPLAY_CURSOR_USER 0x00647000
835#define NV50_PDISPLAY_CURSOR_USER_POS_CTRL(i) ((i) * 0x1000 + 0x00647080)
836#define NV50_PDISPLAY_CURSOR_USER_POS(i) ((i) * 0x1000 + 0x00647084)
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 000000000000..4c7f1e403e80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,321 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include <linux/pagemap.h>
4
5#define NV_CTXDMA_PAGE_SHIFT 12
6#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
7#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
8
9struct nouveau_sgdma_be {
10 struct ttm_backend backend;
11 struct drm_device *dev;
12
13 dma_addr_t *pages;
14 unsigned nr_pages;
15
16 unsigned pte_start;
17 bool bound;
18};
19
20static int
21nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
22 struct page **pages, struct page *dummy_read_page)
23{
24 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
25 struct drm_device *dev = nvbe->dev;
26
27 NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
28
29 if (nvbe->pages)
30 return -EINVAL;
31
32 nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
33 if (!nvbe->pages)
34 return -ENOMEM;
35
36 nvbe->nr_pages = 0;
37 while (num_pages--) {
38 nvbe->pages[nvbe->nr_pages] =
39 pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
40 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
41 if (pci_dma_mapping_error(dev->pdev,
42 nvbe->pages[nvbe->nr_pages])) {
43 be->func->clear(be);
44 return -EFAULT;
45 }
46
47 nvbe->nr_pages++;
48 }
49
50 return 0;
51}
52
53static void
54nouveau_sgdma_clear(struct ttm_backend *be)
55{
56 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
57 struct drm_device *dev = nvbe->dev;
58
59 NV_DEBUG(nvbe->dev, "\n");
60
61 if (nvbe && nvbe->pages) {
62 if (nvbe->bound)
63 be->func->unbind(be);
64
65 while (nvbe->nr_pages--) {
66 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
67 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
68 }
69 kfree(nvbe->pages);
70 nvbe->pages = NULL;
71 nvbe->nr_pages = 0;
72 }
73}
74
75static inline unsigned
76nouveau_sgdma_pte(struct drm_device *dev, uint64_t offset)
77{
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
79 unsigned pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
80
81 if (dev_priv->card_type < NV_50)
82 return pte + 2;
83
84 return pte << 1;
85}
86
87static int
88nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
89{
90 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
91 struct drm_device *dev = nvbe->dev;
92 struct drm_nouveau_private *dev_priv = dev->dev_private;
93 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
94 unsigned i, j, pte;
95
96 NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
97
98 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
99 pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
100 nvbe->pte_start = pte;
101 for (i = 0; i < nvbe->nr_pages; i++) {
102 dma_addr_t dma_offset = nvbe->pages[i];
103 uint32_t offset_l = lower_32_bits(dma_offset);
104 uint32_t offset_h = upper_32_bits(dma_offset);
105
106 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
107 if (dev_priv->card_type < NV_50)
108 nv_wo32(dev, gpuobj, pte++, offset_l | 3);
109 else {
110 nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
111 nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
112 }
113
114 dma_offset += NV_CTXDMA_PAGE_SIZE;
115 }
116 }
117 dev_priv->engine.instmem.finish_access(nvbe->dev);
118
119 if (dev_priv->card_type == NV_50) {
120 nv_wr32(dev, 0x100c80, 0x00050001);
121 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
122 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
123 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
124 nv_rd32(dev, 0x100c80));
125 return -EBUSY;
126 }
127
128 nv_wr32(dev, 0x100c80, 0x00000001);
129 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
130 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
131 NV_ERROR(dev, "0x100c80 = 0x%08x\n",
132 nv_rd32(dev, 0x100c80));
133 return -EBUSY;
134 }
135 }
136
137 nvbe->bound = true;
138 return 0;
139}
140
141static int
142nouveau_sgdma_unbind(struct ttm_backend *be)
143{
144 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
145 struct drm_device *dev = nvbe->dev;
146 struct drm_nouveau_private *dev_priv = dev->dev_private;
147 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
148 unsigned i, j, pte;
149
150 NV_DEBUG(dev, "\n");
151
152 if (!nvbe->bound)
153 return 0;
154
155 dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
156 pte = nvbe->pte_start;
157 for (i = 0; i < nvbe->nr_pages; i++) {
158 dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
159
160 for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
161 if (dev_priv->card_type < NV_50)
162 nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
163 else {
164 nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
165 nv_wo32(dev, gpuobj, pte++, 0x00000000);
166 }
167
168 dma_offset += NV_CTXDMA_PAGE_SIZE;
169 }
170 }
171 dev_priv->engine.instmem.finish_access(nvbe->dev);
172
173 nvbe->bound = false;
174 return 0;
175}
176
177static void
178nouveau_sgdma_destroy(struct ttm_backend *be)
179{
180 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
181
182 if (be) {
183 NV_DEBUG(nvbe->dev, "\n");
184
185 if (nvbe) {
186 if (nvbe->pages)
187 be->func->clear(be);
188 kfree(nvbe);
189 }
190 }
191}
192
193static struct ttm_backend_func nouveau_sgdma_backend = {
194 .populate = nouveau_sgdma_populate,
195 .clear = nouveau_sgdma_clear,
196 .bind = nouveau_sgdma_bind,
197 .unbind = nouveau_sgdma_unbind,
198 .destroy = nouveau_sgdma_destroy
199};
200
201struct ttm_backend *
202nouveau_sgdma_init_ttm(struct drm_device *dev)
203{
204 struct drm_nouveau_private *dev_priv = dev->dev_private;
205 struct nouveau_sgdma_be *nvbe;
206
207 if (!dev_priv->gart_info.sg_ctxdma)
208 return NULL;
209
210 nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
211 if (!nvbe)
212 return NULL;
213
214 nvbe->dev = dev;
215
216 nvbe->backend.func = &nouveau_sgdma_backend;
217
218 return &nvbe->backend;
219}
220
221int
222nouveau_sgdma_init(struct drm_device *dev)
223{
224 struct drm_nouveau_private *dev_priv = dev->dev_private;
225 struct nouveau_gpuobj *gpuobj = NULL;
226 uint32_t aper_size, obj_size;
227 int i, ret;
228
229 if (dev_priv->card_type < NV_50) {
230 aper_size = (64 * 1024 * 1024);
231 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
232 obj_size += 8; /* ctxdma header */
233 } else {
234 /* 1 entire VM page table */
235 aper_size = (512 * 1024 * 1024);
236 obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
237 }
238
239 ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
240 NVOBJ_FLAG_ALLOW_NO_REFS |
241 NVOBJ_FLAG_ZERO_ALLOC |
242 NVOBJ_FLAG_ZERO_FREE, &gpuobj);
243 if (ret) {
244 NV_ERROR(dev, "Error creating sgdma object: %d\n", ret);
245 return ret;
246 }
247
248 dev_priv->gart_info.sg_dummy_page =
249 alloc_page(GFP_KERNEL|__GFP_DMA32);
250 set_bit(PG_locked, &dev_priv->gart_info.sg_dummy_page->flags);
251 dev_priv->gart_info.sg_dummy_bus =
252 pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
253 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
254
255 dev_priv->engine.instmem.prepare_access(dev, true);
256 if (dev_priv->card_type < NV_50) {
257 /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
258 * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
259 * on those cards? */
260 nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
261 (1 << 12) /* PT present */ |
262 (0 << 13) /* PT *not* linear */ |
263 (NV_DMA_ACCESS_RW << 14) |
264 (NV_DMA_TARGET_PCI << 16));
265 nv_wo32(dev, gpuobj, 1, aper_size - 1);
266 for (i = 2; i < 2 + (aper_size >> 12); i++) {
267 nv_wo32(dev, gpuobj, i,
268 dev_priv->gart_info.sg_dummy_bus | 3);
269 }
270 } else {
271 for (i = 0; i < obj_size; i += 8) {
272 nv_wo32(dev, gpuobj, (i+0)/4,
273 dev_priv->gart_info.sg_dummy_bus | 0x21);
274 nv_wo32(dev, gpuobj, (i+4)/4, 0);
275 }
276 }
277 dev_priv->engine.instmem.finish_access(dev);
278
279 dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
280 dev_priv->gart_info.aper_base = 0;
281 dev_priv->gart_info.aper_size = aper_size;
282 dev_priv->gart_info.sg_ctxdma = gpuobj;
283 return 0;
284}
285
286void
287nouveau_sgdma_takedown(struct drm_device *dev)
288{
289 struct drm_nouveau_private *dev_priv = dev->dev_private;
290
291 if (dev_priv->gart_info.sg_dummy_page) {
292 pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
293 NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
294 unlock_page(dev_priv->gart_info.sg_dummy_page);
295 __free_page(dev_priv->gart_info.sg_dummy_page);
296 dev_priv->gart_info.sg_dummy_page = NULL;
297 dev_priv->gart_info.sg_dummy_bus = 0;
298 }
299
300 nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
301}
302
303int
304nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
305{
306 struct drm_nouveau_private *dev_priv = dev->dev_private;
307 struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
308 struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
309 int pte;
310
311 pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
312 if (dev_priv->card_type < NV_50) {
313 instmem->prepare_access(dev, false);
314 *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
315 instmem->finish_access(dev);
316 return 0;
317 }
318
319 NV_ERROR(dev, "Unimplemented on NV50\n");
320 return -EINVAL;
321}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 000000000000..2ed41d339f6a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,811 @@
1/*
2 * Copyright 2005 Stephane Marchesin
3 * Copyright 2008 Stuart Bennett
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include <linux/swab.h>
27#include "drmP.h"
28#include "drm.h"
29#include "drm_sarea.h"
30#include "drm_crtc_helper.h"
31#include <linux/vgaarb.h>
32
33#include "nouveau_drv.h"
34#include "nouveau_drm.h"
35#include "nv50_display.h"
36
37static int nouveau_stub_init(struct drm_device *dev) { return 0; }
38static void nouveau_stub_takedown(struct drm_device *dev) {}
39
40static int nouveau_init_engine_ptrs(struct drm_device *dev)
41{
42 struct drm_nouveau_private *dev_priv = dev->dev_private;
43 struct nouveau_engine *engine = &dev_priv->engine;
44
45 switch (dev_priv->chipset & 0xf0) {
46 case 0x00:
47 engine->instmem.init = nv04_instmem_init;
48 engine->instmem.takedown = nv04_instmem_takedown;
49 engine->instmem.suspend = nv04_instmem_suspend;
50 engine->instmem.resume = nv04_instmem_resume;
51 engine->instmem.populate = nv04_instmem_populate;
52 engine->instmem.clear = nv04_instmem_clear;
53 engine->instmem.bind = nv04_instmem_bind;
54 engine->instmem.unbind = nv04_instmem_unbind;
55 engine->instmem.prepare_access = nv04_instmem_prepare_access;
56 engine->instmem.finish_access = nv04_instmem_finish_access;
57 engine->mc.init = nv04_mc_init;
58 engine->mc.takedown = nv04_mc_takedown;
59 engine->timer.init = nv04_timer_init;
60 engine->timer.read = nv04_timer_read;
61 engine->timer.takedown = nv04_timer_takedown;
62 engine->fb.init = nv04_fb_init;
63 engine->fb.takedown = nv04_fb_takedown;
64 engine->graph.grclass = nv04_graph_grclass;
65 engine->graph.init = nv04_graph_init;
66 engine->graph.takedown = nv04_graph_takedown;
67 engine->graph.fifo_access = nv04_graph_fifo_access;
68 engine->graph.channel = nv04_graph_channel;
69 engine->graph.create_context = nv04_graph_create_context;
70 engine->graph.destroy_context = nv04_graph_destroy_context;
71 engine->graph.load_context = nv04_graph_load_context;
72 engine->graph.unload_context = nv04_graph_unload_context;
73 engine->fifo.channels = 16;
74 engine->fifo.init = nv04_fifo_init;
75 engine->fifo.takedown = nouveau_stub_takedown;
76 engine->fifo.disable = nv04_fifo_disable;
77 engine->fifo.enable = nv04_fifo_enable;
78 engine->fifo.reassign = nv04_fifo_reassign;
79 engine->fifo.channel_id = nv04_fifo_channel_id;
80 engine->fifo.create_context = nv04_fifo_create_context;
81 engine->fifo.destroy_context = nv04_fifo_destroy_context;
82 engine->fifo.load_context = nv04_fifo_load_context;
83 engine->fifo.unload_context = nv04_fifo_unload_context;
84 break;
85 case 0x10:
86 engine->instmem.init = nv04_instmem_init;
87 engine->instmem.takedown = nv04_instmem_takedown;
88 engine->instmem.suspend = nv04_instmem_suspend;
89 engine->instmem.resume = nv04_instmem_resume;
90 engine->instmem.populate = nv04_instmem_populate;
91 engine->instmem.clear = nv04_instmem_clear;
92 engine->instmem.bind = nv04_instmem_bind;
93 engine->instmem.unbind = nv04_instmem_unbind;
94 engine->instmem.prepare_access = nv04_instmem_prepare_access;
95 engine->instmem.finish_access = nv04_instmem_finish_access;
96 engine->mc.init = nv04_mc_init;
97 engine->mc.takedown = nv04_mc_takedown;
98 engine->timer.init = nv04_timer_init;
99 engine->timer.read = nv04_timer_read;
100 engine->timer.takedown = nv04_timer_takedown;
101 engine->fb.init = nv10_fb_init;
102 engine->fb.takedown = nv10_fb_takedown;
103 engine->graph.grclass = nv10_graph_grclass;
104 engine->graph.init = nv10_graph_init;
105 engine->graph.takedown = nv10_graph_takedown;
106 engine->graph.channel = nv10_graph_channel;
107 engine->graph.create_context = nv10_graph_create_context;
108 engine->graph.destroy_context = nv10_graph_destroy_context;
109 engine->graph.fifo_access = nv04_graph_fifo_access;
110 engine->graph.load_context = nv10_graph_load_context;
111 engine->graph.unload_context = nv10_graph_unload_context;
112 engine->fifo.channels = 32;
113 engine->fifo.init = nv10_fifo_init;
114 engine->fifo.takedown = nouveau_stub_takedown;
115 engine->fifo.disable = nv04_fifo_disable;
116 engine->fifo.enable = nv04_fifo_enable;
117 engine->fifo.reassign = nv04_fifo_reassign;
118 engine->fifo.channel_id = nv10_fifo_channel_id;
119 engine->fifo.create_context = nv10_fifo_create_context;
120 engine->fifo.destroy_context = nv10_fifo_destroy_context;
121 engine->fifo.load_context = nv10_fifo_load_context;
122 engine->fifo.unload_context = nv10_fifo_unload_context;
123 break;
124 case 0x20:
125 engine->instmem.init = nv04_instmem_init;
126 engine->instmem.takedown = nv04_instmem_takedown;
127 engine->instmem.suspend = nv04_instmem_suspend;
128 engine->instmem.resume = nv04_instmem_resume;
129 engine->instmem.populate = nv04_instmem_populate;
130 engine->instmem.clear = nv04_instmem_clear;
131 engine->instmem.bind = nv04_instmem_bind;
132 engine->instmem.unbind = nv04_instmem_unbind;
133 engine->instmem.prepare_access = nv04_instmem_prepare_access;
134 engine->instmem.finish_access = nv04_instmem_finish_access;
135 engine->mc.init = nv04_mc_init;
136 engine->mc.takedown = nv04_mc_takedown;
137 engine->timer.init = nv04_timer_init;
138 engine->timer.read = nv04_timer_read;
139 engine->timer.takedown = nv04_timer_takedown;
140 engine->fb.init = nv10_fb_init;
141 engine->fb.takedown = nv10_fb_takedown;
142 engine->graph.grclass = nv20_graph_grclass;
143 engine->graph.init = nv20_graph_init;
144 engine->graph.takedown = nv20_graph_takedown;
145 engine->graph.channel = nv10_graph_channel;
146 engine->graph.create_context = nv20_graph_create_context;
147 engine->graph.destroy_context = nv20_graph_destroy_context;
148 engine->graph.fifo_access = nv04_graph_fifo_access;
149 engine->graph.load_context = nv20_graph_load_context;
150 engine->graph.unload_context = nv20_graph_unload_context;
151 engine->fifo.channels = 32;
152 engine->fifo.init = nv10_fifo_init;
153 engine->fifo.takedown = nouveau_stub_takedown;
154 engine->fifo.disable = nv04_fifo_disable;
155 engine->fifo.enable = nv04_fifo_enable;
156 engine->fifo.reassign = nv04_fifo_reassign;
157 engine->fifo.channel_id = nv10_fifo_channel_id;
158 engine->fifo.create_context = nv10_fifo_create_context;
159 engine->fifo.destroy_context = nv10_fifo_destroy_context;
160 engine->fifo.load_context = nv10_fifo_load_context;
161 engine->fifo.unload_context = nv10_fifo_unload_context;
162 break;
163 case 0x30:
164 engine->instmem.init = nv04_instmem_init;
165 engine->instmem.takedown = nv04_instmem_takedown;
166 engine->instmem.suspend = nv04_instmem_suspend;
167 engine->instmem.resume = nv04_instmem_resume;
168 engine->instmem.populate = nv04_instmem_populate;
169 engine->instmem.clear = nv04_instmem_clear;
170 engine->instmem.bind = nv04_instmem_bind;
171 engine->instmem.unbind = nv04_instmem_unbind;
172 engine->instmem.prepare_access = nv04_instmem_prepare_access;
173 engine->instmem.finish_access = nv04_instmem_finish_access;
174 engine->mc.init = nv04_mc_init;
175 engine->mc.takedown = nv04_mc_takedown;
176 engine->timer.init = nv04_timer_init;
177 engine->timer.read = nv04_timer_read;
178 engine->timer.takedown = nv04_timer_takedown;
179 engine->fb.init = nv10_fb_init;
180 engine->fb.takedown = nv10_fb_takedown;
181 engine->graph.grclass = nv30_graph_grclass;
182 engine->graph.init = nv30_graph_init;
183 engine->graph.takedown = nv20_graph_takedown;
184 engine->graph.fifo_access = nv04_graph_fifo_access;
185 engine->graph.channel = nv10_graph_channel;
186 engine->graph.create_context = nv20_graph_create_context;
187 engine->graph.destroy_context = nv20_graph_destroy_context;
188 engine->graph.load_context = nv20_graph_load_context;
189 engine->graph.unload_context = nv20_graph_unload_context;
190 engine->fifo.channels = 32;
191 engine->fifo.init = nv10_fifo_init;
192 engine->fifo.takedown = nouveau_stub_takedown;
193 engine->fifo.disable = nv04_fifo_disable;
194 engine->fifo.enable = nv04_fifo_enable;
195 engine->fifo.reassign = nv04_fifo_reassign;
196 engine->fifo.channel_id = nv10_fifo_channel_id;
197 engine->fifo.create_context = nv10_fifo_create_context;
198 engine->fifo.destroy_context = nv10_fifo_destroy_context;
199 engine->fifo.load_context = nv10_fifo_load_context;
200 engine->fifo.unload_context = nv10_fifo_unload_context;
201 break;
202 case 0x40:
203 case 0x60:
204 engine->instmem.init = nv04_instmem_init;
205 engine->instmem.takedown = nv04_instmem_takedown;
206 engine->instmem.suspend = nv04_instmem_suspend;
207 engine->instmem.resume = nv04_instmem_resume;
208 engine->instmem.populate = nv04_instmem_populate;
209 engine->instmem.clear = nv04_instmem_clear;
210 engine->instmem.bind = nv04_instmem_bind;
211 engine->instmem.unbind = nv04_instmem_unbind;
212 engine->instmem.prepare_access = nv04_instmem_prepare_access;
213 engine->instmem.finish_access = nv04_instmem_finish_access;
214 engine->mc.init = nv40_mc_init;
215 engine->mc.takedown = nv40_mc_takedown;
216 engine->timer.init = nv04_timer_init;
217 engine->timer.read = nv04_timer_read;
218 engine->timer.takedown = nv04_timer_takedown;
219 engine->fb.init = nv40_fb_init;
220 engine->fb.takedown = nv40_fb_takedown;
221 engine->graph.grclass = nv40_graph_grclass;
222 engine->graph.init = nv40_graph_init;
223 engine->graph.takedown = nv40_graph_takedown;
224 engine->graph.fifo_access = nv04_graph_fifo_access;
225 engine->graph.channel = nv40_graph_channel;
226 engine->graph.create_context = nv40_graph_create_context;
227 engine->graph.destroy_context = nv40_graph_destroy_context;
228 engine->graph.load_context = nv40_graph_load_context;
229 engine->graph.unload_context = nv40_graph_unload_context;
230 engine->fifo.channels = 32;
231 engine->fifo.init = nv40_fifo_init;
232 engine->fifo.takedown = nouveau_stub_takedown;
233 engine->fifo.disable = nv04_fifo_disable;
234 engine->fifo.enable = nv04_fifo_enable;
235 engine->fifo.reassign = nv04_fifo_reassign;
236 engine->fifo.channel_id = nv10_fifo_channel_id;
237 engine->fifo.create_context = nv40_fifo_create_context;
238 engine->fifo.destroy_context = nv40_fifo_destroy_context;
239 engine->fifo.load_context = nv40_fifo_load_context;
240 engine->fifo.unload_context = nv40_fifo_unload_context;
241 break;
242 case 0x50:
243 case 0x80: /* gotta love NVIDIA's consistency.. */
244 case 0x90:
245 case 0xA0:
246 engine->instmem.init = nv50_instmem_init;
247 engine->instmem.takedown = nv50_instmem_takedown;
248 engine->instmem.suspend = nv50_instmem_suspend;
249 engine->instmem.resume = nv50_instmem_resume;
250 engine->instmem.populate = nv50_instmem_populate;
251 engine->instmem.clear = nv50_instmem_clear;
252 engine->instmem.bind = nv50_instmem_bind;
253 engine->instmem.unbind = nv50_instmem_unbind;
254 engine->instmem.prepare_access = nv50_instmem_prepare_access;
255 engine->instmem.finish_access = nv50_instmem_finish_access;
256 engine->mc.init = nv50_mc_init;
257 engine->mc.takedown = nv50_mc_takedown;
258 engine->timer.init = nv04_timer_init;
259 engine->timer.read = nv04_timer_read;
260 engine->timer.takedown = nv04_timer_takedown;
261 engine->fb.init = nouveau_stub_init;
262 engine->fb.takedown = nouveau_stub_takedown;
263 engine->graph.grclass = nv50_graph_grclass;
264 engine->graph.init = nv50_graph_init;
265 engine->graph.takedown = nv50_graph_takedown;
266 engine->graph.fifo_access = nv50_graph_fifo_access;
267 engine->graph.channel = nv50_graph_channel;
268 engine->graph.create_context = nv50_graph_create_context;
269 engine->graph.destroy_context = nv50_graph_destroy_context;
270 engine->graph.load_context = nv50_graph_load_context;
271 engine->graph.unload_context = nv50_graph_unload_context;
272 engine->fifo.channels = 128;
273 engine->fifo.init = nv50_fifo_init;
274 engine->fifo.takedown = nv50_fifo_takedown;
275 engine->fifo.disable = nv04_fifo_disable;
276 engine->fifo.enable = nv04_fifo_enable;
277 engine->fifo.reassign = nv04_fifo_reassign;
278 engine->fifo.channel_id = nv50_fifo_channel_id;
279 engine->fifo.create_context = nv50_fifo_create_context;
280 engine->fifo.destroy_context = nv50_fifo_destroy_context;
281 engine->fifo.load_context = nv50_fifo_load_context;
282 engine->fifo.unload_context = nv50_fifo_unload_context;
283 break;
284 default:
285 NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
286 return 1;
287 }
288
289 return 0;
290}
291
292static unsigned int
293nouveau_vga_set_decode(void *priv, bool state)
294{
295 if (state)
296 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
297 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
298 else
299 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
300}
301
302int
303nouveau_card_init(struct drm_device *dev)
304{
305 struct drm_nouveau_private *dev_priv = dev->dev_private;
306 struct nouveau_engine *engine;
307 struct nouveau_gpuobj *gpuobj;
308 int ret;
309
310 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
311
312 if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
313 return 0;
314
315 vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
316
317 /* Initialise internal driver API hooks */
318 ret = nouveau_init_engine_ptrs(dev);
319 if (ret)
320 return ret;
321 engine = &dev_priv->engine;
322 dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
323
324 /* Parse BIOS tables / Run init tables if card not POSTed */
325 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
326 ret = nouveau_bios_init(dev);
327 if (ret)
328 return ret;
329 }
330
331 ret = nouveau_gpuobj_early_init(dev);
332 if (ret)
333 return ret;
334
335 /* Initialise instance memory, must happen before mem_init so we
336 * know exactly how much VRAM we're able to use for "normal"
337 * purposes.
338 */
339 ret = engine->instmem.init(dev);
340 if (ret)
341 return ret;
342
343 /* Setup the memory manager */
344 ret = nouveau_mem_init(dev);
345 if (ret)
346 return ret;
347
348 ret = nouveau_gpuobj_init(dev);
349 if (ret)
350 return ret;
351
352 /* PMC */
353 ret = engine->mc.init(dev);
354 if (ret)
355 return ret;
356
357 /* PTIMER */
358 ret = engine->timer.init(dev);
359 if (ret)
360 return ret;
361
362 /* PFB */
363 ret = engine->fb.init(dev);
364 if (ret)
365 return ret;
366
367 /* PGRAPH */
368 ret = engine->graph.init(dev);
369 if (ret)
370 return ret;
371
372 /* PFIFO */
373 ret = engine->fifo.init(dev);
374 if (ret)
375 return ret;
376
377 /* this call irq_preinstall, register irq handler and
378 * call irq_postinstall
379 */
380 ret = drm_irq_install(dev);
381 if (ret)
382 return ret;
383
384 ret = drm_vblank_init(dev, 0);
385 if (ret)
386 return ret;
387
388 /* what about PVIDEO/PCRTC/PRAMDAC etc? */
389
390 ret = nouveau_channel_alloc(dev, &dev_priv->channel,
391 (struct drm_file *)-2,
392 NvDmaFB, NvDmaTT);
393 if (ret)
394 return ret;
395
396 gpuobj = NULL;
397 ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
398 0, nouveau_mem_fb_amount(dev),
399 NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
400 &gpuobj);
401 if (ret)
402 return ret;
403
404 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
405 gpuobj, NULL);
406 if (ret) {
407 nouveau_gpuobj_del(dev, &gpuobj);
408 return ret;
409 }
410
411 gpuobj = NULL;
412 ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
413 dev_priv->gart_info.aper_size,
414 NV_DMA_ACCESS_RW, &gpuobj, NULL);
415 if (ret)
416 return ret;
417
418 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
419 gpuobj, NULL);
420 if (ret) {
421 nouveau_gpuobj_del(dev, &gpuobj);
422 return ret;
423 }
424
425 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
426 if (dev_priv->card_type >= NV_50) {
427 ret = nv50_display_create(dev);
428 if (ret)
429 return ret;
430 } else {
431 ret = nv04_display_create(dev);
432 if (ret)
433 return ret;
434 }
435 }
436
437 ret = nouveau_backlight_init(dev);
438 if (ret)
439 NV_ERROR(dev, "Error %d registering backlight\n", ret);
440
441 dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
442
443 if (drm_core_check_feature(dev, DRIVER_MODESET))
444 drm_helper_initial_config(dev);
445
446 return 0;
447}
448
449static void nouveau_card_takedown(struct drm_device *dev)
450{
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_engine *engine = &dev_priv->engine;
453
454 NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
455
456 if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
457 nouveau_backlight_exit(dev);
458
459 if (dev_priv->channel) {
460 nouveau_channel_free(dev_priv->channel);
461 dev_priv->channel = NULL;
462 }
463
464 engine->fifo.takedown(dev);
465 engine->graph.takedown(dev);
466 engine->fb.takedown(dev);
467 engine->timer.takedown(dev);
468 engine->mc.takedown(dev);
469
470 mutex_lock(&dev->struct_mutex);
471 ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
472 mutex_unlock(&dev->struct_mutex);
473 nouveau_sgdma_takedown(dev);
474
475 nouveau_gpuobj_takedown(dev);
476 nouveau_mem_close(dev);
477 engine->instmem.takedown(dev);
478
479 if (drm_core_check_feature(dev, DRIVER_MODESET))
480 drm_irq_uninstall(dev);
481
482 nouveau_gpuobj_late_takedown(dev);
483 nouveau_bios_takedown(dev);
484
485 vga_client_register(dev->pdev, NULL, NULL, NULL);
486
487 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
488 }
489}
490
491/* here a client dies, release the stuff that was allocated for its
492 * file_priv */
493void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
494{
495 nouveau_channel_cleanup(dev, file_priv);
496}
497
498/* first module load, setup the mmio/fb mapping */
499/* KMS: we need mmio at load time, not when the first drm client opens. */
500int nouveau_firstopen(struct drm_device *dev)
501{
502 return 0;
503}
504
505/* if we have an OF card, copy vbios to RAMIN */
506static void nouveau_OF_copy_vbios_to_ramin(struct drm_device *dev)
507{
508#if defined(__powerpc__)
509 int size, i;
510 const uint32_t *bios;
511 struct device_node *dn = pci_device_to_OF_node(dev->pdev);
512 if (!dn) {
513 NV_INFO(dev, "Unable to get the OF node\n");
514 return;
515 }
516
517 bios = of_get_property(dn, "NVDA,BMP", &size);
518 if (bios) {
519 for (i = 0; i < size; i += 4)
520 nv_wi32(dev, i, bios[i/4]);
521 NV_INFO(dev, "OF bios successfully copied (%d bytes)\n", size);
522 } else {
523 NV_INFO(dev, "Unable to get the OF bios\n");
524 }
525#endif
526}
527
528int nouveau_load(struct drm_device *dev, unsigned long flags)
529{
530 struct drm_nouveau_private *dev_priv;
531 uint32_t reg0;
532 resource_size_t mmio_start_offs;
533
534 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
535 if (!dev_priv)
536 return -ENOMEM;
537 dev->dev_private = dev_priv;
538 dev_priv->dev = dev;
539
540 dev_priv->flags = flags & NOUVEAU_FLAGS;
541 dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
542
543 NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
544 dev->pci_vendor, dev->pci_device, dev->pdev->class);
545
546 dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
547
548 if (dev_priv->acpi_dsm)
549 nouveau_hybrid_setup(dev);
550
551 dev_priv->wq = create_workqueue("nouveau");
552 if (!dev_priv->wq)
553 return -EINVAL;
554
555 /* resource 0 is mmio regs */
556 /* resource 1 is linear FB */
557 /* resource 2 is RAMIN (mmio regs + 0x1000000) */
558 /* resource 6 is bios */
559
560 /* map the mmio regs */
561 mmio_start_offs = pci_resource_start(dev->pdev, 0);
562 dev_priv->mmio = ioremap(mmio_start_offs, 0x00800000);
563 if (!dev_priv->mmio) {
564 NV_ERROR(dev, "Unable to initialize the mmio mapping. "
565 "Please report your setup to " DRIVER_EMAIL "\n");
566 return -EINVAL;
567 }
568 NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
569 (unsigned long long)mmio_start_offs);
570
571#ifdef __BIG_ENDIAN
572 /* Put the card in BE mode if it's not */
573 if (nv_rd32(dev, NV03_PMC_BOOT_1))
574 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001);
575
576 DRM_MEMORYBARRIER();
577#endif
578
579 /* Time to determine the card architecture */
580 reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
581
582 /* We're dealing with >=NV10 */
583 if ((reg0 & 0x0f000000) > 0) {
584 /* Bit 27-20 contain the architecture in hex */
585 dev_priv->chipset = (reg0 & 0xff00000) >> 20;
586 /* NV04 or NV05 */
587 } else if ((reg0 & 0xff00fff0) == 0x20004000) {
588 dev_priv->chipset = 0x04;
589 } else
590 dev_priv->chipset = 0xff;
591
592 switch (dev_priv->chipset & 0xf0) {
593 case 0x00:
594 case 0x10:
595 case 0x20:
596 case 0x30:
597 dev_priv->card_type = dev_priv->chipset & 0xf0;
598 break;
599 case 0x40:
600 case 0x60:
601 dev_priv->card_type = NV_40;
602 break;
603 case 0x50:
604 case 0x80:
605 case 0x90:
606 case 0xa0:
607 dev_priv->card_type = NV_50;
608 break;
609 default:
610 NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
611 return -EINVAL;
612 }
613
614 NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
615 dev_priv->card_type, reg0);
616
617 /* map larger RAMIN aperture on NV40 cards */
618 dev_priv->ramin = NULL;
619 if (dev_priv->card_type >= NV_40) {
620 int ramin_bar = 2;
621 if (pci_resource_len(dev->pdev, ramin_bar) == 0)
622 ramin_bar = 3;
623
624 dev_priv->ramin_size = pci_resource_len(dev->pdev, ramin_bar);
625 dev_priv->ramin = ioremap(
626 pci_resource_start(dev->pdev, ramin_bar),
627 dev_priv->ramin_size);
628 if (!dev_priv->ramin) {
629 NV_ERROR(dev, "Failed to init RAMIN mapping, "
630 "limited instance memory available\n");
631 }
632 }
633
634 /* On older cards (or if the above failed), create a map covering
635 * the BAR0 PRAMIN aperture */
636 if (!dev_priv->ramin) {
637 dev_priv->ramin_size = 1 * 1024 * 1024;
638 dev_priv->ramin = ioremap(mmio_start_offs + NV_RAMIN,
639 dev_priv->ramin_size);
640 if (!dev_priv->ramin) {
641 NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
642 return -ENOMEM;
643 }
644 }
645
646 nouveau_OF_copy_vbios_to_ramin(dev);
647
648 /* Special flags */
649 if (dev->pci_device == 0x01a0)
650 dev_priv->flags |= NV_NFORCE;
651 else if (dev->pci_device == 0x01f0)
652 dev_priv->flags |= NV_NFORCE2;
653
654 /* For kernel modesetting, init card now and bring up fbcon */
655 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
656 int ret = nouveau_card_init(dev);
657 if (ret)
658 return ret;
659 }
660
661 return 0;
662}
663
664static void nouveau_close(struct drm_device *dev)
665{
666 struct drm_nouveau_private *dev_priv = dev->dev_private;
667
668 /* In the case of an error dev_priv may not be be allocated yet */
669 if (dev_priv && dev_priv->card_type)
670 nouveau_card_takedown(dev);
671}
672
673/* KMS: we need mmio at load time, not when the first drm client opens. */
674void nouveau_lastclose(struct drm_device *dev)
675{
676 if (drm_core_check_feature(dev, DRIVER_MODESET))
677 return;
678
679 nouveau_close(dev);
680}
681
682int nouveau_unload(struct drm_device *dev)
683{
684 struct drm_nouveau_private *dev_priv = dev->dev_private;
685
686 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
687 if (dev_priv->card_type >= NV_50)
688 nv50_display_destroy(dev);
689 else
690 nv04_display_destroy(dev);
691 nouveau_close(dev);
692 }
693
694 iounmap(dev_priv->mmio);
695 iounmap(dev_priv->ramin);
696
697 kfree(dev_priv);
698 dev->dev_private = NULL;
699 return 0;
700}
701
702int
703nouveau_ioctl_card_init(struct drm_device *dev, void *data,
704 struct drm_file *file_priv)
705{
706 return nouveau_card_init(dev);
707}
708
709int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
710 struct drm_file *file_priv)
711{
712 struct drm_nouveau_private *dev_priv = dev->dev_private;
713 struct drm_nouveau_getparam *getparam = data;
714
715 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
716
717 switch (getparam->param) {
718 case NOUVEAU_GETPARAM_CHIPSET_ID:
719 getparam->value = dev_priv->chipset;
720 break;
721 case NOUVEAU_GETPARAM_PCI_VENDOR:
722 getparam->value = dev->pci_vendor;
723 break;
724 case NOUVEAU_GETPARAM_PCI_DEVICE:
725 getparam->value = dev->pci_device;
726 break;
727 case NOUVEAU_GETPARAM_BUS_TYPE:
728 if (drm_device_is_agp(dev))
729 getparam->value = NV_AGP;
730 else if (drm_device_is_pcie(dev))
731 getparam->value = NV_PCIE;
732 else
733 getparam->value = NV_PCI;
734 break;
735 case NOUVEAU_GETPARAM_FB_PHYSICAL:
736 getparam->value = dev_priv->fb_phys;
737 break;
738 case NOUVEAU_GETPARAM_AGP_PHYSICAL:
739 getparam->value = dev_priv->gart_info.aper_base;
740 break;
741 case NOUVEAU_GETPARAM_PCI_PHYSICAL:
742 if (dev->sg) {
743 getparam->value = (unsigned long)dev->sg->virtual;
744 } else {
745 NV_ERROR(dev, "Requested PCIGART address, "
746 "while no PCIGART was created\n");
747 return -EINVAL;
748 }
749 break;
750 case NOUVEAU_GETPARAM_FB_SIZE:
751 getparam->value = dev_priv->fb_available_size;
752 break;
753 case NOUVEAU_GETPARAM_AGP_SIZE:
754 getparam->value = dev_priv->gart_info.aper_size;
755 break;
756 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
757 getparam->value = dev_priv->vm_vram_base;
758 break;
759 default:
760 NV_ERROR(dev, "unknown parameter %lld\n", getparam->param);
761 return -EINVAL;
762 }
763
764 return 0;
765}
766
767int
768nouveau_ioctl_setparam(struct drm_device *dev, void *data,
769 struct drm_file *file_priv)
770{
771 struct drm_nouveau_setparam *setparam = data;
772
773 NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
774
775 switch (setparam->param) {
776 default:
777 NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
778 return -EINVAL;
779 }
780
781 return 0;
782}
783
784/* Wait until (value(reg) & mask) == val, up until timeout has hit */
785bool nouveau_wait_until(struct drm_device *dev, uint64_t timeout,
786 uint32_t reg, uint32_t mask, uint32_t val)
787{
788 struct drm_nouveau_private *dev_priv = dev->dev_private;
789 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
790 uint64_t start = ptimer->read(dev);
791
792 do {
793 if ((nv_rd32(dev, reg) & mask) == val)
794 return true;
795 } while (ptimer->read(dev) - start < timeout);
796
797 return false;
798}
799
800/* Waits for PGRAPH to go completely idle */
801bool nouveau_wait_for_idle(struct drm_device *dev)
802{
803 if (!nv_wait(NV04_PGRAPH_STATUS, 0xffffffff, 0x00000000)) {
804 NV_ERROR(dev, "PGRAPH idle timed out with status 0x%08x\n",
805 nv_rd32(dev, NV04_PGRAPH_STATUS));
806 return false;
807 }
808
809 return true;
810}
811
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
new file mode 100644
index 000000000000..187eb84e4da5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -0,0 +1,131 @@
1/*
2 * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA,
3 * All Rights Reserved.
4 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA,
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sub license,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28
29#include "nouveau_drv.h"
30
31static struct vm_operations_struct nouveau_ttm_vm_ops;
32static const struct vm_operations_struct *ttm_vm_ops;
33
34static int
35nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36{
37 struct ttm_buffer_object *bo = vma->vm_private_data;
38 int ret;
39
40 if (unlikely(bo == NULL))
41 return VM_FAULT_NOPAGE;
42
43 ret = ttm_vm_ops->fault(vma, vmf);
44 return ret;
45}
46
47int
48nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
49{
50 struct drm_file *file_priv = filp->private_data;
51 struct drm_nouveau_private *dev_priv =
52 file_priv->minor->dev->dev_private;
53 int ret;
54
55 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
56 return drm_mmap(filp, vma);
57
58 ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
59 if (unlikely(ret != 0))
60 return ret;
61
62 if (unlikely(ttm_vm_ops == NULL)) {
63 ttm_vm_ops = vma->vm_ops;
64 nouveau_ttm_vm_ops = *ttm_vm_ops;
65 nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
66 }
67
68 vma->vm_ops = &nouveau_ttm_vm_ops;
69 return 0;
70}
71
72static int
73nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
74{
75 return ttm_mem_global_init(ref->object);
76}
77
78static void
79nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
80{
81 ttm_mem_global_release(ref->object);
82}
83
84int
85nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
86{
87 struct ttm_global_reference *global_ref;
88 int ret;
89
90 global_ref = &dev_priv->ttm.mem_global_ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
92 global_ref->size = sizeof(struct ttm_mem_global);
93 global_ref->init = &nouveau_ttm_mem_global_init;
94 global_ref->release = &nouveau_ttm_mem_global_release;
95
96 ret = ttm_global_item_ref(global_ref);
97 if (unlikely(ret != 0)) {
98 DRM_ERROR("Failed setting up TTM memory accounting\n");
99 dev_priv->ttm.mem_global_ref.release = NULL;
100 return ret;
101 }
102
103 dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
104 global_ref = &dev_priv->ttm.bo_global_ref.ref;
105 global_ref->global_type = TTM_GLOBAL_TTM_BO;
106 global_ref->size = sizeof(struct ttm_bo_global);
107 global_ref->init = &ttm_bo_global_init;
108 global_ref->release = &ttm_bo_global_release;
109
110 ret = ttm_global_item_ref(global_ref);
111 if (unlikely(ret != 0)) {
112 DRM_ERROR("Failed setting up TTM BO subsystem\n");
113 ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
114 dev_priv->ttm.mem_global_ref.release = NULL;
115 return ret;
116 }
117
118 return 0;
119}
120
121void
122nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
123{
124 if (dev_priv->ttm.mem_global_ref.release == NULL)
125 return;
126
127 ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
128 ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
129 dev_priv->ttm.mem_global_ref.release = NULL;
130}
131
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
new file mode 100644
index 000000000000..b91363606055
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -0,0 +1,1002 @@
1/*
2 * Copyright 1993-2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
23 * DEALINGS IN THE SOFTWARE.
24 */
25
26#include "drmP.h"
27#include "drm_crtc_helper.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_connector.h"
32#include "nouveau_crtc.h"
33#include "nouveau_fb.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37static int
38nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
39 struct drm_framebuffer *old_fb);
40
41static void
42crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
43{
44 NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
45 crtcstate->CRTC[index]);
46}
47
48static void nv_crtc_set_digital_vibrance(struct drm_crtc *crtc, int level)
49{
50 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
51 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
52 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
53
54 regp->CRTC[NV_CIO_CRE_CSB] = nv_crtc->saturation = level;
55 if (nv_crtc->saturation && nv_gf4_disp_arch(crtc->dev)) {
56 regp->CRTC[NV_CIO_CRE_CSB] = 0x80;
57 regp->CRTC[NV_CIO_CRE_5B] = nv_crtc->saturation << 2;
58 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_5B);
59 }
60 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_CSB);
61}
62
63static void nv_crtc_set_image_sharpening(struct drm_crtc *crtc, int level)
64{
65 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
66 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
67 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
68
69 nv_crtc->sharpness = level;
70 if (level < 0) /* blur is in hw range 0x3f -> 0x20 */
71 level += 0x40;
72 regp->ramdac_634 = level;
73 NVWriteRAMDAC(crtc->dev, nv_crtc->index, NV_PRAMDAC_634, regp->ramdac_634);
74}
75
76#define PLLSEL_VPLL1_MASK \
77 (NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL \
78 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2)
79#define PLLSEL_VPLL2_MASK \
80 (NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 \
81 | NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2)
82#define PLLSEL_TV_MASK \
83 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
84 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 \
85 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
86 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
87
88/* NV4x 0x40.. pll notes:
89 * gpu pll: 0x4000 + 0x4004
90 * ?gpu? pll: 0x4008 + 0x400c
91 * vpll1: 0x4010 + 0x4014
92 * vpll2: 0x4018 + 0x401c
93 * mpll: 0x4020 + 0x4024
94 * mpll: 0x4038 + 0x403c
95 *
96 * the first register of each pair has some unknown details:
97 * bits 0-7: redirected values from elsewhere? (similar to PLL_SETUP_CONTROL?)
98 * bits 20-23: (mpll) something to do with post divider?
99 * bits 28-31: related to single stage mode? (bit 8/12)
100 */
101
102static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mode * mode, int dot_clock)
103{
104 struct drm_device *dev = crtc->dev;
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
106 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
107 struct nv04_mode_state *state = &dev_priv->mode_reg;
108 struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
109 struct nouveau_pll_vals *pv = &regp->pllvals;
110 struct pll_lims pll_lim;
111
112 if (get_pll_limits(dev, nv_crtc->index ? VPLL2 : VPLL1, &pll_lim))
113 return;
114
115 /* NM2 == 0 is used to determine single stage mode on two stage plls */
116 pv->NM2 = 0;
117
118 /* for newer nv4x the blob uses only the first stage of the vpll below a
119 * certain clock. for a certain nv4b this is 150MHz. since the max
120 * output frequency of the first stage for this card is 300MHz, it is
121 * assumed the threshold is given by vco1 maxfreq/2
122 */
123 /* for early nv4x, specifically nv40 and *some* nv43 (devids 0 and 6,
124 * not 8, others unknown), the blob always uses both plls. no problem
125 * has yet been observed in allowing the use a single stage pll on all
126 * nv43 however. the behaviour of single stage use is untested on nv40
127 */
128 if (dev_priv->chipset > 0x40 && dot_clock <= (pll_lim.vco1.maxfreq / 2))
129 memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
130
131 if (!nouveau_calc_pll_mnp(dev, &pll_lim, dot_clock, pv))
132 return;
133
134 state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
135
136 /* The blob uses this always, so let's do the same */
137 if (dev_priv->card_type == NV_40)
138 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
139 /* again nv40 and some nv43 act more like nv3x as described above */
140 if (dev_priv->chipset < 0x41)
141 state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
142 NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
143 state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
144
145 if (pv->NM2)
146 NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
147 pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
148 else
149 NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
150 pv->N1, pv->M1, pv->log2P);
151
152 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
153}
154
155static void
156nv_crtc_dpms(struct drm_crtc *crtc, int mode)
157{
158 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
159 struct drm_device *dev = crtc->dev;
160 unsigned char seq1 = 0, crtc17 = 0;
161 unsigned char crtc1A;
162
163 NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
164 nv_crtc->index);
165
166 if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
167 return;
168
169 nv_crtc->last_dpms = mode;
170
171 if (nv_two_heads(dev))
172 NVSetOwner(dev, nv_crtc->index);
173
174 /* nv4ref indicates these two RPC1 bits inhibit h/v sync */
175 crtc1A = NVReadVgaCrtc(dev, nv_crtc->index,
176 NV_CIO_CRE_RPC1_INDEX) & ~0xC0;
177 switch (mode) {
178 case DRM_MODE_DPMS_STANDBY:
179 /* Screen: Off; HSync: Off, VSync: On -- Not Supported */
180 seq1 = 0x20;
181 crtc17 = 0x80;
182 crtc1A |= 0x80;
183 break;
184 case DRM_MODE_DPMS_SUSPEND:
185 /* Screen: Off; HSync: On, VSync: Off -- Not Supported */
186 seq1 = 0x20;
187 crtc17 = 0x80;
188 crtc1A |= 0x40;
189 break;
190 case DRM_MODE_DPMS_OFF:
191 /* Screen: Off; HSync: Off, VSync: Off */
192 seq1 = 0x20;
193 crtc17 = 0x00;
194 crtc1A |= 0xC0;
195 break;
196 case DRM_MODE_DPMS_ON:
197 default:
198 /* Screen: On; HSync: On, VSync: On */
199 seq1 = 0x00;
200 crtc17 = 0x80;
201 break;
202 }
203
204 NVVgaSeqReset(dev, nv_crtc->index, true);
205 /* Each head has it's own sequencer, so we can turn it off when we want */
206 seq1 |= (NVReadVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX) & ~0x20);
207 NVWriteVgaSeq(dev, nv_crtc->index, NV_VIO_SR_CLOCK_INDEX, seq1);
208 crtc17 |= (NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX) & ~0x80);
209 mdelay(10);
210 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CR_MODE_INDEX, crtc17);
211 NVVgaSeqReset(dev, nv_crtc->index, false);
212
213 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
214}
215
216static bool
217nv_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode)
219{
220 return true;
221}
222
223static void
224nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
225{
226 struct drm_device *dev = crtc->dev;
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
229 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
230 struct drm_framebuffer *fb = crtc->fb;
231
232 /* Calculate our timings */
233 int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
234 int horizStart = (mode->crtc_hsync_start >> 3) - 1;
235 int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
236 int horizTotal = (mode->crtc_htotal >> 3) - 5;
237 int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
238 int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
239 int vertDisplay = mode->crtc_vdisplay - 1;
240 int vertStart = mode->crtc_vsync_start - 1;
241 int vertEnd = mode->crtc_vsync_end - 1;
242 int vertTotal = mode->crtc_vtotal - 2;
243 int vertBlankStart = mode->crtc_vdisplay - 1;
244 int vertBlankEnd = mode->crtc_vtotal - 1;
245
246 struct drm_encoder *encoder;
247 bool fp_output = false;
248
249 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
250 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
251
252 if (encoder->crtc == crtc &&
253 (nv_encoder->dcb->type == OUTPUT_LVDS ||
254 nv_encoder->dcb->type == OUTPUT_TMDS))
255 fp_output = true;
256 }
257
258 if (fp_output) {
259 vertStart = vertTotal - 3;
260 vertEnd = vertTotal - 2;
261 vertBlankStart = vertStart;
262 horizStart = horizTotal - 5;
263 horizEnd = horizTotal - 2;
264 horizBlankEnd = horizTotal + 4;
265#if 0
266 if (dev->overlayAdaptor && dev_priv->card_type >= NV_10)
267 /* This reportedly works around some video overlay bandwidth problems */
268 horizTotal += 2;
269#endif
270 }
271
272 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
273 vertTotal |= 1;
274
275#if 0
276 ErrorF("horizDisplay: 0x%X \n", horizDisplay);
277 ErrorF("horizStart: 0x%X \n", horizStart);
278 ErrorF("horizEnd: 0x%X \n", horizEnd);
279 ErrorF("horizTotal: 0x%X \n", horizTotal);
280 ErrorF("horizBlankStart: 0x%X \n", horizBlankStart);
281 ErrorF("horizBlankEnd: 0x%X \n", horizBlankEnd);
282 ErrorF("vertDisplay: 0x%X \n", vertDisplay);
283 ErrorF("vertStart: 0x%X \n", vertStart);
284 ErrorF("vertEnd: 0x%X \n", vertEnd);
285 ErrorF("vertTotal: 0x%X \n", vertTotal);
286 ErrorF("vertBlankStart: 0x%X \n", vertBlankStart);
287 ErrorF("vertBlankEnd: 0x%X \n", vertBlankEnd);
288#endif
289
290 /*
291 * compute correct Hsync & Vsync polarity
292 */
293 if ((mode->flags & (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))
294 && (mode->flags & (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) {
295
296 regp->MiscOutReg = 0x23;
297 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
298 regp->MiscOutReg |= 0x40;
299 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
300 regp->MiscOutReg |= 0x80;
301 } else {
302 int vdisplay = mode->vdisplay;
303 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
304 vdisplay *= 2;
305 if (mode->vscan > 1)
306 vdisplay *= mode->vscan;
307 if (vdisplay < 400)
308 regp->MiscOutReg = 0xA3; /* +hsync -vsync */
309 else if (vdisplay < 480)
310 regp->MiscOutReg = 0x63; /* -hsync +vsync */
311 else if (vdisplay < 768)
312 regp->MiscOutReg = 0xE3; /* -hsync -vsync */
313 else
314 regp->MiscOutReg = 0x23; /* +hsync +vsync */
315 }
316
317 regp->MiscOutReg |= (mode->clock_index & 0x03) << 2;
318
319 /*
320 * Time Sequencer
321 */
322 regp->Sequencer[NV_VIO_SR_RESET_INDEX] = 0x00;
323 /* 0x20 disables the sequencer */
324 if (mode->flags & DRM_MODE_FLAG_CLKDIV2)
325 regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x29;
326 else
327 regp->Sequencer[NV_VIO_SR_CLOCK_INDEX] = 0x21;
328 regp->Sequencer[NV_VIO_SR_PLANE_MASK_INDEX] = 0x0F;
329 regp->Sequencer[NV_VIO_SR_CHAR_MAP_INDEX] = 0x00;
330 regp->Sequencer[NV_VIO_SR_MEM_MODE_INDEX] = 0x0E;
331
332 /*
333 * CRTC
334 */
335 regp->CRTC[NV_CIO_CR_HDT_INDEX] = horizTotal;
336 regp->CRTC[NV_CIO_CR_HDE_INDEX] = horizDisplay;
337 regp->CRTC[NV_CIO_CR_HBS_INDEX] = horizBlankStart;
338 regp->CRTC[NV_CIO_CR_HBE_INDEX] = (1 << 7) |
339 XLATE(horizBlankEnd, 0, NV_CIO_CR_HBE_4_0);
340 regp->CRTC[NV_CIO_CR_HRS_INDEX] = horizStart;
341 regp->CRTC[NV_CIO_CR_HRE_INDEX] = XLATE(horizBlankEnd, 5, NV_CIO_CR_HRE_HBE_5) |
342 XLATE(horizEnd, 0, NV_CIO_CR_HRE_4_0);
343 regp->CRTC[NV_CIO_CR_VDT_INDEX] = vertTotal;
344 regp->CRTC[NV_CIO_CR_OVL_INDEX] = XLATE(vertStart, 9, NV_CIO_CR_OVL_VRS_9) |
345 XLATE(vertDisplay, 9, NV_CIO_CR_OVL_VDE_9) |
346 XLATE(vertTotal, 9, NV_CIO_CR_OVL_VDT_9) |
347 (1 << 4) |
348 XLATE(vertBlankStart, 8, NV_CIO_CR_OVL_VBS_8) |
349 XLATE(vertStart, 8, NV_CIO_CR_OVL_VRS_8) |
350 XLATE(vertDisplay, 8, NV_CIO_CR_OVL_VDE_8) |
351 XLATE(vertTotal, 8, NV_CIO_CR_OVL_VDT_8);
352 regp->CRTC[NV_CIO_CR_RSAL_INDEX] = 0x00;
353 regp->CRTC[NV_CIO_CR_CELL_HT_INDEX] = ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ? MASK(NV_CIO_CR_CELL_HT_SCANDBL) : 0) |
354 1 << 6 |
355 XLATE(vertBlankStart, 9, NV_CIO_CR_CELL_HT_VBS_9);
356 regp->CRTC[NV_CIO_CR_CURS_ST_INDEX] = 0x00;
357 regp->CRTC[NV_CIO_CR_CURS_END_INDEX] = 0x00;
358 regp->CRTC[NV_CIO_CR_SA_HI_INDEX] = 0x00;
359 regp->CRTC[NV_CIO_CR_SA_LO_INDEX] = 0x00;
360 regp->CRTC[NV_CIO_CR_TCOFF_HI_INDEX] = 0x00;
361 regp->CRTC[NV_CIO_CR_TCOFF_LO_INDEX] = 0x00;
362 regp->CRTC[NV_CIO_CR_VRS_INDEX] = vertStart;
363 regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
364 regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
365 /* framebuffer can be larger than crtc scanout area. */
366 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
367 regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
368 regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
369 regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
370 regp->CRTC[NV_CIO_CR_MODE_INDEX] = 0x43;
371 regp->CRTC[NV_CIO_CR_LCOMP_INDEX] = 0xff;
372
373 /*
374 * Some extended CRTC registers (they are not saved with the rest of the vga regs).
375 */
376
377 /* framebuffer can be larger than crtc scanout area. */
378 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
379 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
380 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
381 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
382 XLATE(vertBlankStart, 10, NV_CIO_CRE_LSR_VBS_10) |
383 XLATE(vertStart, 10, NV_CIO_CRE_LSR_VRS_10) |
384 XLATE(vertDisplay, 10, NV_CIO_CRE_LSR_VDE_10) |
385 XLATE(vertTotal, 10, NV_CIO_CRE_LSR_VDT_10);
386 regp->CRTC[NV_CIO_CRE_HEB__INDEX] = XLATE(horizStart, 8, NV_CIO_CRE_HEB_HRS_8) |
387 XLATE(horizBlankStart, 8, NV_CIO_CRE_HEB_HBS_8) |
388 XLATE(horizDisplay, 8, NV_CIO_CRE_HEB_HDE_8) |
389 XLATE(horizTotal, 8, NV_CIO_CRE_HEB_HDT_8);
390 regp->CRTC[NV_CIO_CRE_EBR_INDEX] = XLATE(vertBlankStart, 11, NV_CIO_CRE_EBR_VBS_11) |
391 XLATE(vertStart, 11, NV_CIO_CRE_EBR_VRS_11) |
392 XLATE(vertDisplay, 11, NV_CIO_CRE_EBR_VDE_11) |
393 XLATE(vertTotal, 11, NV_CIO_CRE_EBR_VDT_11);
394
395 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
396 horizTotal = (horizTotal >> 1) & ~1;
397 regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = horizTotal;
398 regp->CRTC[NV_CIO_CRE_HEB__INDEX] |= XLATE(horizTotal, 8, NV_CIO_CRE_HEB_ILC_8);
399 } else
400 regp->CRTC[NV_CIO_CRE_ILACE__INDEX] = 0xff; /* interlace off */
401
402 /*
403 * Graphics Display Controller
404 */
405 regp->Graphics[NV_VIO_GX_SR_INDEX] = 0x00;
406 regp->Graphics[NV_VIO_GX_SREN_INDEX] = 0x00;
407 regp->Graphics[NV_VIO_GX_CCOMP_INDEX] = 0x00;
408 regp->Graphics[NV_VIO_GX_ROP_INDEX] = 0x00;
409 regp->Graphics[NV_VIO_GX_READ_MAP_INDEX] = 0x00;
410 regp->Graphics[NV_VIO_GX_MODE_INDEX] = 0x40; /* 256 color mode */
411 regp->Graphics[NV_VIO_GX_MISC_INDEX] = 0x05; /* map 64k mem + graphic mode */
412 regp->Graphics[NV_VIO_GX_DONT_CARE_INDEX] = 0x0F;
413 regp->Graphics[NV_VIO_GX_BIT_MASK_INDEX] = 0xFF;
414
415 regp->Attribute[0] = 0x00; /* standard colormap translation */
416 regp->Attribute[1] = 0x01;
417 regp->Attribute[2] = 0x02;
418 regp->Attribute[3] = 0x03;
419 regp->Attribute[4] = 0x04;
420 regp->Attribute[5] = 0x05;
421 regp->Attribute[6] = 0x06;
422 regp->Attribute[7] = 0x07;
423 regp->Attribute[8] = 0x08;
424 regp->Attribute[9] = 0x09;
425 regp->Attribute[10] = 0x0A;
426 regp->Attribute[11] = 0x0B;
427 regp->Attribute[12] = 0x0C;
428 regp->Attribute[13] = 0x0D;
429 regp->Attribute[14] = 0x0E;
430 regp->Attribute[15] = 0x0F;
431 regp->Attribute[NV_CIO_AR_MODE_INDEX] = 0x01; /* Enable graphic mode */
432 /* Non-vga */
433 regp->Attribute[NV_CIO_AR_OSCAN_INDEX] = 0x00;
434 regp->Attribute[NV_CIO_AR_PLANE_INDEX] = 0x0F; /* enable all color planes */
435 regp->Attribute[NV_CIO_AR_HPP_INDEX] = 0x00;
436 regp->Attribute[NV_CIO_AR_CSEL_INDEX] = 0x00;
437}
438
439/**
440 * Sets up registers for the given mode/adjusted_mode pair.
441 *
442 * The clocks, CRTCs and outputs attached to this CRTC must be off.
443 *
444 * This shouldn't enable any clocks, CRTCs, or outputs, but they should
445 * be easily turned on/off after this.
446 */
447static void
448nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
449{
450 struct drm_device *dev = crtc->dev;
451 struct drm_nouveau_private *dev_priv = dev->dev_private;
452 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
453 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
454 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
455 struct drm_encoder *encoder;
456 bool lvds_output = false, tmds_output = false, tv_output = false,
457 off_chip_digital = false;
458
459 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
460 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
461 bool digital = false;
462
463 if (encoder->crtc != crtc)
464 continue;
465
466 if (nv_encoder->dcb->type == OUTPUT_LVDS)
467 digital = lvds_output = true;
468 if (nv_encoder->dcb->type == OUTPUT_TV)
469 tv_output = true;
470 if (nv_encoder->dcb->type == OUTPUT_TMDS)
471 digital = tmds_output = true;
472 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP && digital)
473 off_chip_digital = true;
474 }
475
476 /* Registers not directly related to the (s)vga mode */
477
478 /* What is the meaning of this register? */
479 /* A few popular values are 0x18, 0x1c, 0x38, 0x3c */
480 regp->CRTC[NV_CIO_CRE_ENH_INDEX] = savep->CRTC[NV_CIO_CRE_ENH_INDEX] & ~(1<<5);
481
482 regp->crtc_eng_ctrl = 0;
483 /* Except for rare conditions I2C is enabled on the primary crtc */
484 if (nv_crtc->index == 0)
485 regp->crtc_eng_ctrl |= NV_CRTC_FSEL_I2C;
486#if 0
487 /* Set overlay to desired crtc. */
488 if (dev->overlayAdaptor) {
489 NVPortPrivPtr pPriv = GET_OVERLAY_PRIVATE(dev);
490 if (pPriv->overlayCRTC == nv_crtc->index)
491 regp->crtc_eng_ctrl |= NV_CRTC_FSEL_OVERLAY;
492 }
493#endif
494
495 /* ADDRESS_SPACE_PNVM is the same as setting HCUR_ASI */
496 regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
497 NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
498 NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
499 if (dev_priv->chipset >= 0x11)
500 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
501 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
502 regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
503
504 /* Unblock some timings */
505 regp->CRTC[NV_CIO_CRE_53] = 0;
506 regp->CRTC[NV_CIO_CRE_54] = 0;
507
508 /* 0x00 is disabled, 0x11 is lvds, 0x22 crt and 0x88 tmds */
509 if (lvds_output)
510 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x11;
511 else if (tmds_output)
512 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x88;
513 else
514 regp->CRTC[NV_CIO_CRE_SCRATCH3__INDEX] = 0x22;
515
516 /* These values seem to vary */
517 /* This register seems to be used by the bios to make certain decisions on some G70 cards? */
518 regp->CRTC[NV_CIO_CRE_SCRATCH4__INDEX] = savep->CRTC[NV_CIO_CRE_SCRATCH4__INDEX];
519
520 nv_crtc_set_digital_vibrance(crtc, nv_crtc->saturation);
521
522 /* probably a scratch reg, but kept for cargo-cult purposes:
523 * bit0: crtc0?, head A
524 * bit6: lvds, head A
525 * bit7: (only in X), head A
526 */
527 if (nv_crtc->index == 0)
528 regp->CRTC[NV_CIO_CRE_4B] = savep->CRTC[NV_CIO_CRE_4B] | 0x80;
529
530 /* The blob seems to take the current value from crtc 0, add 4 to that
531 * and reuse the old value for crtc 1 */
532 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] = dev_priv->saved_reg.crtc_reg[0].CRTC[NV_CIO_CRE_TVOUT_LATENCY];
533 if (!nv_crtc->index)
534 regp->CRTC[NV_CIO_CRE_TVOUT_LATENCY] += 4;
535
536 /* the blob sometimes sets |= 0x10 (which is the same as setting |=
537 * 1 << 30 on 0x60.830), for no apparent reason */
538 regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
539
540 regp->crtc_830 = mode->crtc_vdisplay - 3;
541 regp->crtc_834 = mode->crtc_vdisplay - 1;
542
543 if (dev_priv->card_type == NV_40)
544 /* This is what the blob does */
545 regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
546
547 if (dev_priv->card_type >= NV_30)
548 regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
549
550 regp->crtc_cfg = NV_PCRTC_CONFIG_START_ADDRESS_HSYNC;
551
552 /* Some misc regs */
553 if (dev_priv->card_type == NV_40) {
554 regp->CRTC[NV_CIO_CRE_85] = 0xFF;
555 regp->CRTC[NV_CIO_CRE_86] = 0x1;
556 }
557
558 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->fb->depth + 1) / 8;
559 /* Enable slaved mode (called MODE_TV in nv4ref.h) */
560 if (lvds_output || tmds_output || tv_output)
561 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7);
562
563 /* Generic PRAMDAC regs */
564
565 if (dev_priv->card_type >= NV_10)
566 /* Only bit that bios and blob set. */
567 regp->nv10_cursync = (1 << 25);
568
569 regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
570 NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL |
571 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
572 if (crtc->fb->depth == 16)
573 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
574 if (dev_priv->chipset >= 0x11)
575 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
576
577 regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
578 regp->tv_setup = 0;
579
580 nv_crtc_set_image_sharpening(crtc, nv_crtc->sharpness);
581
582 /* Some values the blob sets */
583 regp->ramdac_8c0 = 0x100;
584 regp->ramdac_a20 = 0x0;
585 regp->ramdac_a24 = 0xfffff;
586 regp->ramdac_a34 = 0x1;
587}
588
589/**
590 * Sets up registers for the given mode/adjusted_mode pair.
591 *
592 * The clocks, CRTCs and outputs attached to this CRTC must be off.
593 *
594 * This shouldn't enable any clocks, CRTCs, or outputs, but they should
595 * be easily turned on/off after this.
596 */
597static int
598nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
599 struct drm_display_mode *adjusted_mode,
600 int x, int y, struct drm_framebuffer *old_fb)
601{
602 struct drm_device *dev = crtc->dev;
603 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
604 struct drm_nouveau_private *dev_priv = dev->dev_private;
605
606 NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
607 drm_mode_debug_printmodeline(adjusted_mode);
608
609 /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
610 nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
611
612 nv_crtc_mode_set_vga(crtc, adjusted_mode);
613 /* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
614 if (dev_priv->card_type == NV_40)
615 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
616 nv_crtc_mode_set_regs(crtc, adjusted_mode);
617 nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
618 return 0;
619}
620
621static void nv_crtc_save(struct drm_crtc *crtc)
622{
623 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
624 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
625 struct nv04_mode_state *state = &dev_priv->mode_reg;
626 struct nv04_crtc_reg *crtc_state = &state->crtc_reg[nv_crtc->index];
627 struct nv04_mode_state *saved = &dev_priv->saved_reg;
628 struct nv04_crtc_reg *crtc_saved = &saved->crtc_reg[nv_crtc->index];
629
630 if (nv_two_heads(crtc->dev))
631 NVSetOwner(crtc->dev, nv_crtc->index);
632
633 nouveau_hw_save_state(crtc->dev, nv_crtc->index, saved);
634
635 /* init some state to saved value */
636 state->sel_clk = saved->sel_clk & ~(0x5 << 16);
637 crtc_state->CRTC[NV_CIO_CRE_LCD__INDEX] = crtc_saved->CRTC[NV_CIO_CRE_LCD__INDEX];
638 state->pllsel = saved->pllsel & ~(PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK);
639 crtc_state->gpio_ext = crtc_saved->gpio_ext;
640}
641
642static void nv_crtc_restore(struct drm_crtc *crtc)
643{
644 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
645 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
646 int head = nv_crtc->index;
647 uint8_t saved_cr21 = dev_priv->saved_reg.crtc_reg[head].CRTC[NV_CIO_CRE_21];
648
649 if (nv_two_heads(crtc->dev))
650 NVSetOwner(crtc->dev, head);
651
652 nouveau_hw_load_state(crtc->dev, head, &dev_priv->saved_reg);
653 nv_lock_vga_crtc_shadow(crtc->dev, head, saved_cr21);
654
655 nv_crtc->last_dpms = NV_DPMS_CLEARED;
656}
657
658static void nv_crtc_prepare(struct drm_crtc *crtc)
659{
660 struct drm_device *dev = crtc->dev;
661 struct drm_nouveau_private *dev_priv = dev->dev_private;
662 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
663 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
664
665 if (nv_two_heads(dev))
666 NVSetOwner(dev, nv_crtc->index);
667
668 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
669
670 NVBlankScreen(dev, nv_crtc->index, true);
671
672 /* Some more preperation. */
673 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
674 if (dev_priv->card_type == NV_40) {
675 uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
676 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
677 }
678}
679
680static void nv_crtc_commit(struct drm_crtc *crtc)
681{
682 struct drm_device *dev = crtc->dev;
683 struct drm_crtc_helper_funcs *funcs = crtc->helper_private;
684 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
685 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
686
687 nouveau_hw_load_state(dev, nv_crtc->index, &dev_priv->mode_reg);
688 nv04_crtc_mode_set_base(crtc, crtc->x, crtc->y, NULL);
689
690#ifdef __BIG_ENDIAN
691 /* turn on LFB swapping */
692 {
693 uint8_t tmp = NVReadVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR);
694 tmp |= MASK(NV_CIO_CRE_RCR_ENDIAN_BIG);
695 NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RCR, tmp);
696 }
697#endif
698
699 funcs->dpms(crtc, DRM_MODE_DPMS_ON);
700}
701
702static void nv_crtc_destroy(struct drm_crtc *crtc)
703{
704 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
705
706 NV_DEBUG(crtc->dev, "\n");
707
708 if (!nv_crtc)
709 return;
710
711 drm_crtc_cleanup(crtc);
712
713 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
714 kfree(nv_crtc);
715}
716
717static void
718nv_crtc_gamma_load(struct drm_crtc *crtc)
719{
720 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
721 struct drm_device *dev = nv_crtc->base.dev;
722 struct drm_nouveau_private *dev_priv = dev->dev_private;
723 struct rgb { uint8_t r, g, b; } __attribute__((packed)) *rgbs;
724 int i;
725
726 rgbs = (struct rgb *)dev_priv->mode_reg.crtc_reg[nv_crtc->index].DAC;
727 for (i = 0; i < 256; i++) {
728 rgbs[i].r = nv_crtc->lut.r[i] >> 8;
729 rgbs[i].g = nv_crtc->lut.g[i] >> 8;
730 rgbs[i].b = nv_crtc->lut.b[i] >> 8;
731 }
732
733 nouveau_hw_load_state_palette(dev, nv_crtc->index, &dev_priv->mode_reg);
734}
735
736static void
737nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
738{
739 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
740 int i;
741
742 if (size != 256)
743 return;
744
745 for (i = 0; i < 256; i++) {
746 nv_crtc->lut.r[i] = r[i];
747 nv_crtc->lut.g[i] = g[i];
748 nv_crtc->lut.b[i] = b[i];
749 }
750
751 /* We need to know the depth before we upload, but it's possible to
752 * get called before a framebuffer is bound. If this is the case,
753 * mark the lut values as dirty by setting depth==0, and it'll be
754 * uploaded on the first mode_set_base()
755 */
756 if (!nv_crtc->base.fb) {
757 nv_crtc->lut.depth = 0;
758 return;
759 }
760
761 nv_crtc_gamma_load(crtc);
762}
763
764static int
765nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
766 struct drm_framebuffer *old_fb)
767{
768 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
769 struct drm_device *dev = crtc->dev;
770 struct drm_nouveau_private *dev_priv = dev->dev_private;
771 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
772 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
773 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
774 int arb_burst, arb_lwm;
775 int ret;
776
777 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
778 if (ret)
779 return ret;
780
781 if (old_fb) {
782 struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
783 nouveau_bo_unpin(ofb->nvbo);
784 }
785
786 nv_crtc->fb.offset = fb->nvbo->bo.offset;
787
788 if (nv_crtc->lut.depth != drm_fb->depth) {
789 nv_crtc->lut.depth = drm_fb->depth;
790 nv_crtc_gamma_load(crtc);
791 }
792
793 /* Update the framebuffer format. */
794 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3;
795 regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->fb->depth + 1) / 8;
796 regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
797 if (crtc->fb->depth == 16)
798 regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
799 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX);
800 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
801 regp->ramdac_gen_ctrl);
802
803 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
804 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
805 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
806 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
807 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
808
809 /* Update the framebuffer location. */
810 regp->fb_start = nv_crtc->fb.offset & ~3;
811 regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
812 NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_START, regp->fb_start);
813
814 /* Update the arbitration parameters. */
815 nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel,
816 &arb_burst, &arb_lwm);
817
818 regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst;
819 regp->CRTC[NV_CIO_CRE_FFLWM__INDEX] = arb_lwm & 0xff;
820 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
821 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
822
823 if (dev_priv->card_type >= NV_30) {
824 regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
825 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
826 }
827
828 return 0;
829}
830
831static void nv04_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
832 struct nouveau_bo *dst)
833{
834 int width = nv_cursor_width(dev);
835 uint32_t pixel;
836 int i, j;
837
838 for (i = 0; i < width; i++) {
839 for (j = 0; j < width; j++) {
840 pixel = nouveau_bo_rd32(src, i*64 + j);
841
842 nouveau_bo_wr16(dst, i*width + j, (pixel & 0x80000000) >> 16
843 | (pixel & 0xf80000) >> 9
844 | (pixel & 0xf800) >> 6
845 | (pixel & 0xf8) >> 3);
846 }
847 }
848}
849
850static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
851 struct nouveau_bo *dst)
852{
853 uint32_t pixel;
854 int alpha, i;
855
856 /* nv11+ supports premultiplied (PM), or non-premultiplied (NPM) alpha
857 * cursors (though NPM in combination with fp dithering may not work on
858 * nv11, from "nv" driver history)
859 * NPM mode needs NV_PCRTC_CURSOR_CONFIG_ALPHA_BLEND set and is what the
860 * blob uses, however we get given PM cursors so we use PM mode
861 */
862 for (i = 0; i < 64 * 64; i++) {
863 pixel = nouveau_bo_rd32(src, i);
864
865 /* hw gets unhappy if alpha <= rgb values. for a PM image "less
866 * than" shouldn't happen; fix "equal to" case by adding one to
867 * alpha channel (slightly inaccurate, but so is attempting to
868 * get back to NPM images, due to limits of integer precision)
869 */
870 alpha = pixel >> 24;
871 if (alpha > 0 && alpha < 255)
872 pixel = (pixel & 0x00ffffff) | ((alpha + 1) << 24);
873
874#ifdef __BIG_ENDIAN
875 {
876 struct drm_nouveau_private *dev_priv = dev->dev_private;
877
878 if (dev_priv->chipset == 0x11) {
879 pixel = ((pixel & 0x000000ff) << 24) |
880 ((pixel & 0x0000ff00) << 8) |
881 ((pixel & 0x00ff0000) >> 8) |
882 ((pixel & 0xff000000) >> 24);
883 }
884 }
885#endif
886
887 nouveau_bo_wr32(dst, i, pixel);
888 }
889}
890
891static int
892nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
893 uint32_t buffer_handle, uint32_t width, uint32_t height)
894{
895 struct drm_nouveau_private *dev_priv = crtc->dev->dev_private;
896 struct drm_device *dev = dev_priv->dev;
897 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
898 struct nouveau_bo *cursor = NULL;
899 struct drm_gem_object *gem;
900 int ret = 0;
901
902 if (width != 64 || height != 64)
903 return -EINVAL;
904
905 if (!buffer_handle) {
906 nv_crtc->cursor.hide(nv_crtc, true);
907 return 0;
908 }
909
910 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
911 if (!gem)
912 return -EINVAL;
913 cursor = nouveau_gem_object(gem);
914
915 ret = nouveau_bo_map(cursor);
916 if (ret)
917 goto out;
918
919 if (dev_priv->chipset >= 0x11)
920 nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
921 else
922 nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
923
924 nouveau_bo_unmap(cursor);
925 nv_crtc->cursor.offset = nv_crtc->cursor.nvbo->bo.offset;
926 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
927 nv_crtc->cursor.show(nv_crtc, true);
928out:
929 mutex_lock(&dev->struct_mutex);
930 drm_gem_object_unreference(gem);
931 mutex_unlock(&dev->struct_mutex);
932 return ret;
933}
934
935static int
936nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
937{
938 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
939
940 nv_crtc->cursor.set_pos(nv_crtc, x, y);
941 return 0;
942}
943
944static const struct drm_crtc_funcs nv04_crtc_funcs = {
945 .save = nv_crtc_save,
946 .restore = nv_crtc_restore,
947 .cursor_set = nv04_crtc_cursor_set,
948 .cursor_move = nv04_crtc_cursor_move,
949 .gamma_set = nv_crtc_gamma_set,
950 .set_config = drm_crtc_helper_set_config,
951 .destroy = nv_crtc_destroy,
952};
953
954static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
955 .dpms = nv_crtc_dpms,
956 .prepare = nv_crtc_prepare,
957 .commit = nv_crtc_commit,
958 .mode_fixup = nv_crtc_mode_fixup,
959 .mode_set = nv_crtc_mode_set,
960 .mode_set_base = nv04_crtc_mode_set_base,
961 .load_lut = nv_crtc_gamma_load,
962};
963
964int
965nv04_crtc_create(struct drm_device *dev, int crtc_num)
966{
967 struct nouveau_crtc *nv_crtc;
968 int ret, i;
969
970 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
971 if (!nv_crtc)
972 return -ENOMEM;
973
974 for (i = 0; i < 256; i++) {
975 nv_crtc->lut.r[i] = i << 8;
976 nv_crtc->lut.g[i] = i << 8;
977 nv_crtc->lut.b[i] = i << 8;
978 }
979 nv_crtc->lut.depth = 0;
980
981 nv_crtc->index = crtc_num;
982 nv_crtc->last_dpms = NV_DPMS_CLEARED;
983
984 drm_crtc_init(dev, &nv_crtc->base, &nv04_crtc_funcs);
985 drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
986 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
987
988 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
989 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
990 if (!ret) {
991 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
992 if (!ret)
993 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
994 if (ret)
995 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
996 }
997
998 nv04_cursor_init(nv_crtc);
999
1000 return 0;
1001}
1002
diff --git a/drivers/gpu/drm/nouveau/nv04_cursor.c b/drivers/gpu/drm/nouveau/nv04_cursor.c
new file mode 100644
index 000000000000..89a91b9d8b25
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_cursor.c
@@ -0,0 +1,70 @@
1#include "drmP.h"
2#include "drm_mode.h"
3#include "nouveau_reg.h"
4#include "nouveau_drv.h"
5#include "nouveau_crtc.h"
6#include "nouveau_hw.h"
7
8static void
9nv04_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
10{
11 nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, true);
12}
13
14static void
15nv04_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
16{
17 nv_show_cursor(nv_crtc->base.dev, nv_crtc->index, false);
18}
19
20static void
21nv04_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
22{
23 NVWriteRAMDAC(nv_crtc->base.dev, nv_crtc->index,
24 NV_PRAMDAC_CU_START_POS,
25 XLATE(y, 0, NV_PRAMDAC_CU_START_POS_Y) |
26 XLATE(x, 0, NV_PRAMDAC_CU_START_POS_X));
27}
28
29static void
30crtc_wr_cio_state(struct drm_crtc *crtc, struct nv04_crtc_reg *crtcstate, int index)
31{
32 NVWriteVgaCrtc(crtc->dev, nouveau_crtc(crtc)->index, index,
33 crtcstate->CRTC[index]);
34}
35
36static void
37nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
38{
39 struct drm_device *dev = nv_crtc->base.dev;
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
42 struct drm_crtc *crtc = &nv_crtc->base;
43
44 regp->CRTC[NV_CIO_CRE_HCUR_ADDR0_INDEX] =
45 MASK(NV_CIO_CRE_HCUR_ASI) |
46 XLATE(offset, 17, NV_CIO_CRE_HCUR_ADDR0_ADR);
47 regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] =
48 XLATE(offset, 11, NV_CIO_CRE_HCUR_ADDR1_ADR);
49 if (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)
50 regp->CRTC[NV_CIO_CRE_HCUR_ADDR1_INDEX] |=
51 MASK(NV_CIO_CRE_HCUR_ADDR1_CUR_DBL);
52 regp->CRTC[NV_CIO_CRE_HCUR_ADDR2_INDEX] = offset >> 24;
53
54 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
55 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
56 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
57 if (dev_priv->card_type == NV_40)
58 nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
59}
60
61int
62nv04_cursor_init(struct nouveau_crtc *crtc)
63{
64 crtc->cursor.set_offset = nv04_cursor_set_offset;
65 crtc->cursor.set_pos = nv04_cursor_set_pos;
66 crtc->cursor.hide = nv04_cursor_hide;
67 crtc->cursor.show = nv04_cursor_show;
68 return 0;
69}
70
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
new file mode 100644
index 000000000000..a5fa51714e87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -0,0 +1,528 @@
1/*
2 * Copyright 2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 * Copyright 2007-2009 Stuart Bennett
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_encoder.h"
32#include "nouveau_connector.h"
33#include "nouveau_crtc.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37int nv04_dac_output_offset(struct drm_encoder *encoder)
38{
39 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
40 int offset = 0;
41
42 if (dcb->or & (8 | OUTPUT_C))
43 offset += 0x68;
44 if (dcb->or & (8 | OUTPUT_B))
45 offset += 0x2000;
46
47 return offset;
48}
49
50/*
51 * arbitrary limit to number of sense oscillations tolerated in one sample
52 * period (observed to be at least 13 in "nvidia")
53 */
54#define MAX_HBLANK_OSC 20
55
56/*
57 * arbitrary limit to number of conflicting sample pairs to tolerate at a
58 * voltage step (observed to be at least 5 in "nvidia")
59 */
60#define MAX_SAMPLE_PAIRS 10
61
62static int sample_load_twice(struct drm_device *dev, bool sense[2])
63{
64 int i;
65
66 for (i = 0; i < 2; i++) {
67 bool sense_a, sense_b, sense_b_prime;
68 int j = 0;
69
70 /*
71 * wait for bit 0 clear -- out of hblank -- (say reg value 0x4),
72 * then wait for transition 0x4->0x5->0x4: enter hblank, leave
73 * hblank again
74 * use a 10ms timeout (guards against crtc being inactive, in
75 * which case blank state would never change)
76 */
77 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
78 0x00000001, 0x00000000))
79 return -EBUSY;
80 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
81 0x00000001, 0x00000001))
82 return -EBUSY;
83 if (!nouveau_wait_until(dev, 10000000, NV_PRMCIO_INP0__COLOR,
84 0x00000001, 0x00000000))
85 return -EBUSY;
86
87 udelay(100);
88 /* when level triggers, sense is _LO_ */
89 sense_a = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
90
91 /* take another reading until it agrees with sense_a... */
92 do {
93 udelay(100);
94 sense_b = nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
95 if (sense_a != sense_b) {
96 sense_b_prime =
97 nv_rd08(dev, NV_PRMCIO_INP0) & 0x10;
98 if (sense_b == sense_b_prime) {
99 /* ... unless two consecutive subsequent
100 * samples agree; sense_a is replaced */
101 sense_a = sense_b;
102 /* force mis-match so we loop */
103 sense_b = !sense_a;
104 }
105 }
106 } while ((sense_a != sense_b) && ++j < MAX_HBLANK_OSC);
107
108 if (j == MAX_HBLANK_OSC)
109 /* with so much oscillation, default to sense:LO */
110 sense[i] = false;
111 else
112 sense[i] = sense_a;
113 }
114
115 return 0;
116}
117
118static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
119 struct drm_connector *connector)
120{
121 struct drm_device *dev = encoder->dev;
122 uint8_t saved_seq1, saved_pi, saved_rpc1;
123 uint8_t saved_palette0[3], saved_palette_mask;
124 uint32_t saved_rtest_ctrl, saved_rgen_ctrl;
125 int i;
126 uint8_t blue;
127 bool sense = true;
128
129 /*
130 * for this detection to work, there needs to be a mode set up on the
131 * CRTC. this is presumed to be the case
132 */
133
134 if (nv_two_heads(dev))
135 /* only implemented for head A for now */
136 NVSetOwner(dev, 0);
137
138 saved_seq1 = NVReadVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX);
139 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1 & ~0x20);
140
141 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL);
142 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL,
143 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
144
145 msleep(10);
146
147 saved_pi = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX);
148 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX,
149 saved_pi & ~(0x80 | MASK(NV_CIO_CRE_PIXEL_FORMAT)));
150 saved_rpc1 = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX);
151 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1 & ~0xc0);
152
153 nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS, 0x0);
154 for (i = 0; i < 3; i++)
155 saved_palette0[i] = nv_rd08(dev, NV_PRMDIO_PALETTE_DATA);
156 saved_palette_mask = nv_rd08(dev, NV_PRMDIO_PIXEL_MASK);
157 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, 0);
158
159 saved_rgen_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL);
160 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL,
161 (saved_rgen_ctrl & ~(NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS |
162 NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM)) |
163 NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON);
164
165 blue = 8; /* start of test range */
166
167 do {
168 bool sense_pair[2];
169
170 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
171 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
172 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, 0);
173 /* testing blue won't find monochrome monitors. I don't care */
174 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, blue);
175
176 i = 0;
177 /* take sample pairs until both samples in the pair agree */
178 do {
179 if (sample_load_twice(dev, sense_pair))
180 goto out;
181 } while ((sense_pair[0] != sense_pair[1]) &&
182 ++i < MAX_SAMPLE_PAIRS);
183
184 if (i == MAX_SAMPLE_PAIRS)
185 /* too much oscillation defaults to LO */
186 sense = false;
187 else
188 sense = sense_pair[0];
189
190 /*
191 * if sense goes LO before blue ramps to 0x18, monitor is not connected.
192 * ergo, if blue gets to 0x18, monitor must be connected
193 */
194 } while (++blue < 0x18 && sense);
195
196out:
197 nv_wr08(dev, NV_PRMDIO_PIXEL_MASK, saved_palette_mask);
198 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_GENERAL_CONTROL, saved_rgen_ctrl);
199 nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS, 0);
200 for (i = 0; i < 3; i++)
201 nv_wr08(dev, NV_PRMDIO_PALETTE_DATA, saved_palette0[i]);
202 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL, saved_rtest_ctrl);
203 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX, saved_pi);
204 NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_RPC1_INDEX, saved_rpc1);
205 NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
206
207 if (blue == 0x18) {
208 NV_TRACE(dev, "Load detected on head A\n");
209 return connector_status_connected;
210 }
211
212 return connector_status_disconnected;
213}
214
215enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
216 struct drm_connector *connector)
217{
218 struct drm_device *dev = encoder->dev;
219 struct drm_nouveau_private *dev_priv = dev->dev_private;
220 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
221 uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
222 uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
223 saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
224 int head, present = 0;
225
226#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
227 if (dcb->type == OUTPUT_TV) {
228 testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
229
230 if (dev_priv->vbios->tvdactestval)
231 testval = dev_priv->vbios->tvdactestval;
232 } else {
233 testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
234
235 if (dev_priv->vbios->dactestval)
236 testval = dev_priv->vbios->dactestval;
237 }
238
239 saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
240 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset,
241 saved_rtest_ctrl & ~NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF);
242
243 saved_powerctrl_2 = nvReadMC(dev, NV_PBUS_POWERCTRL_2);
244
245 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2 & 0xd7ffffff);
246 if (regoffset == 0x68) {
247 saved_powerctrl_4 = nvReadMC(dev, NV_PBUS_POWERCTRL_4);
248 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
249 }
250
251 saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
252 saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
253
254 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
255 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
256
257 msleep(4);
258
259 saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
260 head = (saved_routput & 0x100) >> 8;
261#if 0
262 /* if there's a spare crtc, using it will minimise flicker for the case
263 * where the in-use crtc is in use by an off-chip tmds encoder */
264 if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
265 head ^= 1;
266#endif
267 /* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
268 routput = (saved_routput & 0xfffffece) | head << 8;
269
270 if (dev_priv->card_type >= NV_40) {
271 if (dcb->type == OUTPUT_TV)
272 routput |= 0x1a << 16;
273 else
274 routput &= ~(0x1a << 16);
275 }
276
277 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, routput);
278 msleep(1);
279
280 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
281 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, temp | 1);
282
283 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA,
284 NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK | testval);
285 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
286 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
287 temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
288 msleep(5);
289
290 temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
291
292 if (dcb->type == OUTPUT_TV)
293 present = (nv17_tv_detect(encoder, connector, temp)
294 == connector_status_connected);
295 else
296 present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
297
298 temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
299 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
300 temp & ~NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
301 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TESTPOINT_DATA, 0);
302
303 /* bios does something more complex for restoring, but I think this is good enough */
304 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, saved_routput);
305 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, saved_rtest_ctrl);
306 if (regoffset == 0x68)
307 nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
308 nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
309
310 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
311 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
312
313 if (present) {
314 NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
315 return connector_status_connected;
316 }
317
318 return connector_status_disconnected;
319}
320
321
322static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
323 struct drm_display_mode *mode,
324 struct drm_display_mode *adjusted_mode)
325{
326 return true;
327}
328
329static void nv04_dac_prepare(struct drm_encoder *encoder)
330{
331 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
332 struct drm_device *dev = encoder->dev;
333 struct drm_nouveau_private *dev_priv = dev->dev_private;
334 int head = nouveau_crtc(encoder->crtc)->index;
335 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
336
337 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
338
339 nv04_dfp_disable(dev, head);
340
341 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
342 * at LCD__INDEX which we don't alter
343 */
344 if (!(crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] & 0x44))
345 crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
346}
347
348
349static void nv04_dac_mode_set(struct drm_encoder *encoder,
350 struct drm_display_mode *mode,
351 struct drm_display_mode *adjusted_mode)
352{
353 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
354 struct drm_device *dev = encoder->dev;
355 struct drm_nouveau_private *dev_priv = dev->dev_private;
356 int head = nouveau_crtc(encoder->crtc)->index;
357
358 NV_TRACE(dev, "%s called for encoder %d\n", __func__,
359 nv_encoder->dcb->index);
360
361 if (nv_gf4_disp_arch(dev)) {
362 struct drm_encoder *rebind;
363 uint32_t dac_offset = nv04_dac_output_offset(encoder);
364 uint32_t otherdac;
365
366 /* bit 16-19 are bits that are set on some G70 cards,
367 * but don't seem to have much effect */
368 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
369 head << 8 | NV_PRAMDAC_DACCLK_SEL_DACCLK);
370 /* force any other vga encoders to bind to the other crtc */
371 list_for_each_entry(rebind, &dev->mode_config.encoder_list, head) {
372 if (rebind == encoder
373 || nouveau_encoder(rebind)->dcb->type != OUTPUT_ANALOG)
374 continue;
375
376 dac_offset = nv04_dac_output_offset(rebind);
377 otherdac = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset);
378 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + dac_offset,
379 (otherdac & ~0x0100) | (head ^ 1) << 8);
380 }
381 }
382
383 /* This could use refinement for flatpanels, but it should work this way */
384 if (dev_priv->chipset < 0x44)
385 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
386 else
387 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
388}
389
390static void nv04_dac_commit(struct drm_encoder *encoder)
391{
392 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
393 struct drm_device *dev = encoder->dev;
394 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
395 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
396
397 helper->dpms(encoder, DRM_MODE_DPMS_ON);
398
399 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
400 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
401 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
402}
403
404void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
405{
406 struct drm_device *dev = encoder->dev;
407 struct drm_nouveau_private *dev_priv = dev->dev_private;
408 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
409
410 if (nv_gf4_disp_arch(dev)) {
411 uint32_t *dac_users = &dev_priv->dac_users[ffs(dcb->or) - 1];
412 int dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder);
413 uint32_t dacclk = NVReadRAMDAC(dev, 0, dacclk_off);
414
415 if (enable) {
416 *dac_users |= 1 << dcb->index;
417 NVWriteRAMDAC(dev, 0, dacclk_off, dacclk | NV_PRAMDAC_DACCLK_SEL_DACCLK);
418
419 } else {
420 *dac_users &= ~(1 << dcb->index);
421 if (!*dac_users)
422 NVWriteRAMDAC(dev, 0, dacclk_off,
423 dacclk & ~NV_PRAMDAC_DACCLK_SEL_DACCLK);
424 }
425 }
426}
427
428static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
429{
430 struct drm_device *dev = encoder->dev;
431 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
432
433 if (nv_encoder->last_dpms == mode)
434 return;
435 nv_encoder->last_dpms = mode;
436
437 NV_INFO(dev, "Setting dpms mode %d on vga encoder (output %d)\n",
438 mode, nv_encoder->dcb->index);
439
440 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
441}
442
443static void nv04_dac_save(struct drm_encoder *encoder)
444{
445 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
446 struct drm_device *dev = encoder->dev;
447
448 if (nv_gf4_disp_arch(dev))
449 nv_encoder->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
450 nv04_dac_output_offset(encoder));
451}
452
453static void nv04_dac_restore(struct drm_encoder *encoder)
454{
455 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
456 struct drm_device *dev = encoder->dev;
457
458 if (nv_gf4_disp_arch(dev))
459 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder),
460 nv_encoder->restore.output);
461
462 nv_encoder->last_dpms = NV_DPMS_CLEARED;
463}
464
465static void nv04_dac_destroy(struct drm_encoder *encoder)
466{
467 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
468
469 NV_DEBUG(encoder->dev, "\n");
470
471 drm_encoder_cleanup(encoder);
472 kfree(nv_encoder);
473}
474
475static const struct drm_encoder_helper_funcs nv04_dac_helper_funcs = {
476 .dpms = nv04_dac_dpms,
477 .save = nv04_dac_save,
478 .restore = nv04_dac_restore,
479 .mode_fixup = nv04_dac_mode_fixup,
480 .prepare = nv04_dac_prepare,
481 .commit = nv04_dac_commit,
482 .mode_set = nv04_dac_mode_set,
483 .detect = nv04_dac_detect
484};
485
486static const struct drm_encoder_helper_funcs nv17_dac_helper_funcs = {
487 .dpms = nv04_dac_dpms,
488 .save = nv04_dac_save,
489 .restore = nv04_dac_restore,
490 .mode_fixup = nv04_dac_mode_fixup,
491 .prepare = nv04_dac_prepare,
492 .commit = nv04_dac_commit,
493 .mode_set = nv04_dac_mode_set,
494 .detect = nv17_dac_detect
495};
496
497static const struct drm_encoder_funcs nv04_dac_funcs = {
498 .destroy = nv04_dac_destroy,
499};
500
501int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
502{
503 const struct drm_encoder_helper_funcs *helper;
504 struct drm_encoder *encoder;
505 struct nouveau_encoder *nv_encoder = NULL;
506
507 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
508 if (!nv_encoder)
509 return -ENOMEM;
510
511 encoder = to_drm_encoder(nv_encoder);
512
513 nv_encoder->dcb = entry;
514 nv_encoder->or = ffs(entry->or) - 1;
515
516 if (nv_gf4_disp_arch(dev))
517 helper = &nv17_dac_helper_funcs;
518 else
519 helper = &nv04_dac_helper_funcs;
520
521 drm_encoder_init(dev, encoder, &nv04_dac_funcs, DRM_MODE_ENCODER_DAC);
522 drm_encoder_helper_add(encoder, helper);
523
524 encoder->possible_crtcs = entry->heads;
525 encoder->possible_clones = 0;
526
527 return 0;
528}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
new file mode 100644
index 000000000000..e5b33339d595
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright 2003 NVIDIA, Corporation
3 * Copyright 2006 Dave Airlie
4 * Copyright 2007 Maarten Maathuis
5 * Copyright 2007-2009 Stuart Bennett
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#include "nouveau_drv.h"
31#include "nouveau_encoder.h"
32#include "nouveau_connector.h"
33#include "nouveau_crtc.h"
34#include "nouveau_hw.h"
35#include "nvreg.h"
36
37#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
38 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
39 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
40#define FP_TG_CONTROL_OFF (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE | \
41 NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE | \
42 NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE)
43
44static inline bool is_fpc_off(uint32_t fpc)
45{
46 return ((fpc & (FP_TG_CONTROL_ON | FP_TG_CONTROL_OFF)) ==
47 FP_TG_CONTROL_OFF);
48}
49
50int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent)
51{
52 /* special case of nv_read_tmds to find crtc associated with an output.
53 * this does not give a correct answer for off-chip dvi, but there's no
54 * use for such an answer anyway
55 */
56 int ramdac = (dcbent->or & OUTPUT_C) >> 2;
57
58 NVWriteRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_CONTROL,
59 NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE | 0x4);
60 return ((NVReadRAMDAC(dev, ramdac, NV_PRAMDAC_FP_TMDS_DATA) & 0x8) >> 3) ^ ramdac;
61}
62
63void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
64 int head, bool dl)
65{
66 /* The BIOS scripts don't do this for us, sadly
67 * Luckily we do know the values ;-)
68 *
69 * head < 0 indicates we wish to force a setting with the overrideval
70 * (for VT restore etc.)
71 */
72
73 int ramdac = (dcbent->or & OUTPUT_C) >> 2;
74 uint8_t tmds04 = 0x80;
75
76 if (head != ramdac)
77 tmds04 = 0x88;
78
79 if (dcbent->type == OUTPUT_LVDS)
80 tmds04 |= 0x01;
81
82 nv_write_tmds(dev, dcbent->or, 0, 0x04, tmds04);
83
84 if (dl) /* dual link */
85 nv_write_tmds(dev, dcbent->or, 1, 0x04, tmds04 ^ 0x08);
86}
87
88void nv04_dfp_disable(struct drm_device *dev, int head)
89{
90 struct drm_nouveau_private *dev_priv = dev->dev_private;
91 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
92
93 if (NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL) &
94 FP_TG_CONTROL_ON) {
95 /* digital remnants must be cleaned before new crtc
96 * values programmed. delay is time for the vga stuff
97 * to realise it's in control again
98 */
99 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
100 FP_TG_CONTROL_OFF);
101 msleep(50);
102 }
103 /* don't inadvertently turn it on when state written later */
104 crtcstate[head].fp_control = FP_TG_CONTROL_OFF;
105}
106
107void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
108{
109 struct drm_device *dev = encoder->dev;
110 struct drm_nouveau_private *dev_priv = dev->dev_private;
111 struct drm_crtc *crtc;
112 struct nouveau_crtc *nv_crtc;
113 uint32_t *fpc;
114
115 if (mode == DRM_MODE_DPMS_ON) {
116 nv_crtc = nouveau_crtc(encoder->crtc);
117 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
118
119 if (is_fpc_off(*fpc)) {
120 /* using saved value is ok, as (is_digital && dpms_on &&
121 * fp_control==OFF) is (at present) *only* true when
122 * fpc's most recent change was by below "off" code
123 */
124 *fpc = nv_crtc->dpms_saved_fp_control;
125 }
126
127 nv_crtc->fp_users |= 1 << nouveau_encoder(encoder)->dcb->index;
128 NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_FP_TG_CONTROL, *fpc);
129 } else {
130 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
131 nv_crtc = nouveau_crtc(crtc);
132 fpc = &dev_priv->mode_reg.crtc_reg[nv_crtc->index].fp_control;
133
134 nv_crtc->fp_users &= ~(1 << nouveau_encoder(encoder)->dcb->index);
135 if (!is_fpc_off(*fpc) && !nv_crtc->fp_users) {
136 nv_crtc->dpms_saved_fp_control = *fpc;
137 /* cut the FP output */
138 *fpc &= ~FP_TG_CONTROL_ON;
139 *fpc |= FP_TG_CONTROL_OFF;
140 NVWriteRAMDAC(dev, nv_crtc->index,
141 NV_PRAMDAC_FP_TG_CONTROL, *fpc);
142 }
143 }
144 }
145}
146
147static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
148 struct drm_display_mode *mode,
149 struct drm_display_mode *adjusted_mode)
150{
151 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
152 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
153
154 /* For internal panels and gpu scaling on DVI we need the native mode */
155 if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
156 if (!nv_connector->native_mode)
157 return false;
158 nv_encoder->mode = *nv_connector->native_mode;
159 adjusted_mode->clock = nv_connector->native_mode->clock;
160 } else {
161 nv_encoder->mode = *adjusted_mode;
162 }
163
164 return true;
165}
166
167static void nv04_dfp_prepare_sel_clk(struct drm_device *dev,
168 struct nouveau_encoder *nv_encoder, int head)
169{
170 struct drm_nouveau_private *dev_priv = dev->dev_private;
171 struct nv04_mode_state *state = &dev_priv->mode_reg;
172 uint32_t bits1618 = nv_encoder->dcb->or & OUTPUT_A ? 0x10000 : 0x40000;
173
174 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP)
175 return;
176
177 /* SEL_CLK is only used on the primary ramdac
178 * It toggles spread spectrum PLL output and sets the bindings of PLLs
179 * to heads on digital outputs
180 */
181 if (head)
182 state->sel_clk |= bits1618;
183 else
184 state->sel_clk &= ~bits1618;
185
186 /* nv30:
187 * bit 0 NVClk spread spectrum on/off
188 * bit 2 MemClk spread spectrum on/off
189 * bit 4 PixClk1 spread spectrum on/off toggle
190 * bit 6 PixClk2 spread spectrum on/off toggle
191 *
192 * nv40 (observations from bios behaviour and mmio traces):
193 * bits 4&6 as for nv30
194 * bits 5&7 head dependent as for bits 4&6, but do not appear with 4&6;
195 * maybe a different spread mode
196 * bits 8&10 seen on dual-link dvi outputs, purpose unknown (set by POST scripts)
197 * The logic behind turning spread spectrum on/off in the first place,
198 * and which bit-pair to use, is unclear on nv40 (for earlier cards, the fp table
199 * entry has the necessary info)
200 */
201 if (nv_encoder->dcb->type == OUTPUT_LVDS && dev_priv->saved_reg.sel_clk & 0xf0) {
202 int shift = (dev_priv->saved_reg.sel_clk & 0x50) ? 0 : 1;
203
204 state->sel_clk &= ~0xf0;
205 state->sel_clk |= (head ? 0x40 : 0x10) << shift;
206 }
207}
208
209static void nv04_dfp_prepare(struct drm_encoder *encoder)
210{
211 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
212 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
213 struct drm_device *dev = encoder->dev;
214 struct drm_nouveau_private *dev_priv = dev->dev_private;
215 int head = nouveau_crtc(encoder->crtc)->index;
216 struct nv04_crtc_reg *crtcstate = dev_priv->mode_reg.crtc_reg;
217 uint8_t *cr_lcd = &crtcstate[head].CRTC[NV_CIO_CRE_LCD__INDEX];
218 uint8_t *cr_lcd_oth = &crtcstate[head ^ 1].CRTC[NV_CIO_CRE_LCD__INDEX];
219
220 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
221
222 nv04_dfp_prepare_sel_clk(dev, nv_encoder, head);
223
224 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
225 * at LCD__INDEX which we don't alter
226 */
227 if (!(*cr_lcd & 0x44)) {
228 *cr_lcd = 0x3;
229
230 if (nv_two_heads(dev)) {
231 if (nv_encoder->dcb->location == DCB_LOC_ON_CHIP)
232 *cr_lcd |= head ? 0x0 : 0x8;
233 else {
234 *cr_lcd |= (nv_encoder->dcb->or << 4) & 0x30;
235 if (nv_encoder->dcb->type == OUTPUT_LVDS)
236 *cr_lcd |= 0x30;
237 if ((*cr_lcd & 0x30) == (*cr_lcd_oth & 0x30)) {
238 /* avoid being connected to both crtcs */
239 *cr_lcd_oth &= ~0x30;
240 NVWriteVgaCrtc(dev, head ^ 1,
241 NV_CIO_CRE_LCD__INDEX,
242 *cr_lcd_oth);
243 }
244 }
245 }
246 }
247}
248
249
250static void nv04_dfp_mode_set(struct drm_encoder *encoder,
251 struct drm_display_mode *mode,
252 struct drm_display_mode *adjusted_mode)
253{
254 struct drm_device *dev = encoder->dev;
255 struct drm_nouveau_private *dev_priv = dev->dev_private;
256 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
257 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
258 struct nv04_crtc_reg *savep = &dev_priv->saved_reg.crtc_reg[nv_crtc->index];
259 struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
260 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
261 struct drm_display_mode *output_mode = &nv_encoder->mode;
262 uint32_t mode_ratio, panel_ratio;
263
264 NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
265 drm_mode_debug_printmodeline(output_mode);
266
267 /* Initialize the FP registers in this CRTC. */
268 regp->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
269 regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
270 if (!nv_gf4_disp_arch(dev) ||
271 (output_mode->hsync_start - output_mode->hdisplay) >=
272 dev_priv->vbios->digital_min_front_porch)
273 regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
274 else
275 regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
276 regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
277 regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
278 regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
279 regp->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - 1;
280
281 regp->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
282 regp->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
283 regp->fp_vert_regs[FP_CRTC] = output_mode->vtotal - 5 - 1;
284 regp->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1;
285 regp->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
286 regp->fp_vert_regs[FP_VALID_START] = 0;
287 regp->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - 1;
288
289 /* bit26: a bit seen on some g7x, no as yet discernable purpose */
290 regp->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
291 (savep->fp_control & (1 << 26 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG));
292 /* Deal with vsync/hsync polarity */
293 /* LVDS screens do set this, but modes with +ve syncs are very rare */
294 if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
295 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
296 if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
297 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
298 /* panel scaling first, as native would get set otherwise */
299 if (nv_connector->scaling_mode == DRM_MODE_SCALE_NONE ||
300 nv_connector->scaling_mode == DRM_MODE_SCALE_CENTER) /* panel handles it */
301 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER;
302 else if (adjusted_mode->hdisplay == output_mode->hdisplay &&
303 adjusted_mode->vdisplay == output_mode->vdisplay) /* native mode */
304 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE;
305 else /* gpu needs to scale */
306 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE;
307 if (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT)
308 regp->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
309 if (nv_encoder->dcb->location != DCB_LOC_ON_CHIP &&
310 output_mode->clock > 165000)
311 regp->fp_control |= (2 << 24);
312 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
313 bool duallink, dummy;
314
315 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode->
316 clock, &duallink, &dummy);
317 if (duallink)
318 regp->fp_control |= (8 << 28);
319 } else
320 if (output_mode->clock > 165000)
321 regp->fp_control |= (8 << 28);
322
323 regp->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
324 NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
325 NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
326 NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
327 NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
328 NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
329 NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
330
331 /* We want automatic scaling */
332 regp->fp_debug_1 = 0;
333 /* This can override HTOTAL and VTOTAL */
334 regp->fp_debug_2 = 0;
335
336 /* Use 20.12 fixed point format to avoid floats */
337 mode_ratio = (1 << 12) * adjusted_mode->hdisplay / adjusted_mode->vdisplay;
338 panel_ratio = (1 << 12) * output_mode->hdisplay / output_mode->vdisplay;
339 /* if ratios are equal, SCALE_ASPECT will automatically (and correctly)
340 * get treated the same as SCALE_FULLSCREEN */
341 if (nv_connector->scaling_mode == DRM_MODE_SCALE_ASPECT &&
342 mode_ratio != panel_ratio) {
343 uint32_t diff, scale;
344 bool divide_by_2 = nv_gf4_disp_arch(dev);
345
346 if (mode_ratio < panel_ratio) {
347 /* vertical needs to expand to glass size (automatic)
348 * horizontal needs to be scaled at vertical scale factor
349 * to maintain aspect */
350
351 scale = (1 << 12) * adjusted_mode->vdisplay / output_mode->vdisplay;
352 regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
353 XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
354
355 /* restrict area of screen used, horizontally */
356 diff = output_mode->hdisplay -
357 output_mode->vdisplay * mode_ratio / (1 << 12);
358 regp->fp_horiz_regs[FP_VALID_START] += diff / 2;
359 regp->fp_horiz_regs[FP_VALID_END] -= diff / 2;
360 }
361
362 if (mode_ratio > panel_ratio) {
363 /* horizontal needs to expand to glass size (automatic)
364 * vertical needs to be scaled at horizontal scale factor
365 * to maintain aspect */
366
367 scale = (1 << 12) * adjusted_mode->hdisplay / output_mode->hdisplay;
368 regp->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
369 XLATE(scale, divide_by_2, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE);
370
371 /* restrict area of screen used, vertically */
372 diff = output_mode->vdisplay -
373 (1 << 12) * output_mode->hdisplay / mode_ratio;
374 regp->fp_vert_regs[FP_VALID_START] += diff / 2;
375 regp->fp_vert_regs[FP_VALID_END] -= diff / 2;
376 }
377 }
378
379 /* Output property. */
380 if (nv_connector->use_dithering) {
381 if (dev_priv->chipset == 0x11)
382 regp->dither = savep->dither | 0x00010000;
383 else {
384 int i;
385 regp->dither = savep->dither | 0x00000001;
386 for (i = 0; i < 3; i++) {
387 regp->dither_regs[i] = 0xe4e4e4e4;
388 regp->dither_regs[i + 3] = 0x44444444;
389 }
390 }
391 } else {
392 if (dev_priv->chipset != 0x11) {
393 /* reset them */
394 int i;
395 for (i = 0; i < 3; i++) {
396 regp->dither_regs[i] = savep->dither_regs[i];
397 regp->dither_regs[i + 3] = savep->dither_regs[i + 3];
398 }
399 }
400 regp->dither = savep->dither;
401 }
402
403 regp->fp_margin_color = 0;
404}
405
406static void nv04_dfp_commit(struct drm_encoder *encoder)
407{
408 struct drm_device *dev = encoder->dev;
409 struct drm_nouveau_private *dev_priv = dev->dev_private;
410 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
411 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
412 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
413 struct dcb_entry *dcbe = nv_encoder->dcb;
414 int head = nouveau_crtc(encoder->crtc)->index;
415
416 NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
417
418 if (dcbe->type == OUTPUT_TMDS)
419 run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
420 else if (dcbe->type == OUTPUT_LVDS)
421 call_lvds_script(dev, dcbe, head, LVDS_RESET, nv_encoder->mode.clock);
422
423 /* update fp_control state for any changes made by scripts,
424 * so correct value is written at DPMS on */
425 dev_priv->mode_reg.crtc_reg[head].fp_control =
426 NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
427
428 /* This could use refinement for flatpanels, but it should work this way */
429 if (dev_priv->chipset < 0x44)
430 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
431 else
432 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
433
434 helper->dpms(encoder, DRM_MODE_DPMS_ON);
435
436 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
437 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
438 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
439}
440
441static inline bool is_powersaving_dpms(int mode)
442{
443 return (mode != DRM_MODE_DPMS_ON);
444}
445
446static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
447{
448 struct drm_device *dev = encoder->dev;
449 struct drm_crtc *crtc = encoder->crtc;
450 struct drm_nouveau_private *dev_priv = dev->dev_private;
451 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
452 bool was_powersaving = is_powersaving_dpms(nv_encoder->last_dpms);
453
454 if (nv_encoder->last_dpms == mode)
455 return;
456 nv_encoder->last_dpms = mode;
457
458 NV_INFO(dev, "Setting dpms mode %d on lvds encoder (output %d)\n",
459 mode, nv_encoder->dcb->index);
460
461 if (was_powersaving && is_powersaving_dpms(mode))
462 return;
463
464 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
465 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
466
467 /* when removing an output, crtc may not be set, but PANEL_OFF
468 * must still be run
469 */
470 int head = crtc ? nouveau_crtc(crtc)->index :
471 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
472
473 if (mode == DRM_MODE_DPMS_ON) {
474 if (!nv_connector->native_mode) {
475 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
476 return;
477 }
478 call_lvds_script(dev, nv_encoder->dcb, head,
479 LVDS_PANEL_ON, nv_connector->native_mode->clock);
480 } else
481 /* pxclk of 0 is fine for PANEL_OFF, and for a
482 * disconnected LVDS encoder there is no native_mode
483 */
484 call_lvds_script(dev, nv_encoder->dcb, head,
485 LVDS_PANEL_OFF, 0);
486 }
487
488 nv04_dfp_update_fp_control(encoder, mode);
489
490 if (mode == DRM_MODE_DPMS_ON)
491 nv04_dfp_prepare_sel_clk(dev, nv_encoder, nouveau_crtc(crtc)->index);
492 else {
493 dev_priv->mode_reg.sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
494 dev_priv->mode_reg.sel_clk &= ~0xf0;
495 }
496 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, dev_priv->mode_reg.sel_clk);
497}
498
499static void nv04_tmds_dpms(struct drm_encoder *encoder, int mode)
500{
501 struct drm_device *dev = encoder->dev;
502 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
503
504 if (nv_encoder->last_dpms == mode)
505 return;
506 nv_encoder->last_dpms = mode;
507
508 NV_INFO(dev, "Setting dpms mode %d on tmds encoder (output %d)\n",
509 mode, nv_encoder->dcb->index);
510
511 nv04_dfp_update_fp_control(encoder, mode);
512}
513
514static void nv04_dfp_save(struct drm_encoder *encoder)
515{
516 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
517 struct drm_device *dev = encoder->dev;
518
519 if (nv_two_heads(dev))
520 nv_encoder->restore.head =
521 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
522}
523
524static void nv04_dfp_restore(struct drm_encoder *encoder)
525{
526 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
527 struct drm_device *dev = encoder->dev;
528 struct drm_nouveau_private *dev_priv = dev->dev_private;
529 int head = nv_encoder->restore.head;
530
531 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
532 struct drm_display_mode *native_mode = nouveau_encoder_connector_get(nv_encoder)->native_mode;
533 if (native_mode)
534 call_lvds_script(dev, nv_encoder->dcb, head, LVDS_PANEL_ON,
535 native_mode->clock);
536 else
537 NV_ERROR(dev, "Not restoring LVDS without native mode\n");
538
539 } else if (nv_encoder->dcb->type == OUTPUT_TMDS) {
540 int clock = nouveau_hw_pllvals_to_clk
541 (&dev_priv->saved_reg.crtc_reg[head].pllvals);
542
543 run_tmds_table(dev, nv_encoder->dcb, head, clock);
544 }
545
546 nv_encoder->last_dpms = NV_DPMS_CLEARED;
547}
548
549static void nv04_dfp_destroy(struct drm_encoder *encoder)
550{
551 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
552
553 NV_DEBUG(encoder->dev, "\n");
554
555 drm_encoder_cleanup(encoder);
556 kfree(nv_encoder);
557}
558
559static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
560 .dpms = nv04_lvds_dpms,
561 .save = nv04_dfp_save,
562 .restore = nv04_dfp_restore,
563 .mode_fixup = nv04_dfp_mode_fixup,
564 .prepare = nv04_dfp_prepare,
565 .commit = nv04_dfp_commit,
566 .mode_set = nv04_dfp_mode_set,
567 .detect = NULL,
568};
569
570static const struct drm_encoder_helper_funcs nv04_tmds_helper_funcs = {
571 .dpms = nv04_tmds_dpms,
572 .save = nv04_dfp_save,
573 .restore = nv04_dfp_restore,
574 .mode_fixup = nv04_dfp_mode_fixup,
575 .prepare = nv04_dfp_prepare,
576 .commit = nv04_dfp_commit,
577 .mode_set = nv04_dfp_mode_set,
578 .detect = NULL,
579};
580
581static const struct drm_encoder_funcs nv04_dfp_funcs = {
582 .destroy = nv04_dfp_destroy,
583};
584
585int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
586{
587 const struct drm_encoder_helper_funcs *helper;
588 struct drm_encoder *encoder;
589 struct nouveau_encoder *nv_encoder = NULL;
590 int type;
591
592 switch (entry->type) {
593 case OUTPUT_TMDS:
594 type = DRM_MODE_ENCODER_TMDS;
595 helper = &nv04_tmds_helper_funcs;
596 break;
597 case OUTPUT_LVDS:
598 type = DRM_MODE_ENCODER_LVDS;
599 helper = &nv04_lvds_helper_funcs;
600 break;
601 default:
602 return -EINVAL;
603 }
604
605 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
606 if (!nv_encoder)
607 return -ENOMEM;
608
609 encoder = to_drm_encoder(nv_encoder);
610
611 nv_encoder->dcb = entry;
612 nv_encoder->or = ffs(entry->or) - 1;
613
614 drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
615 drm_encoder_helper_add(encoder, helper);
616
617 encoder->possible_crtcs = entry->heads;
618 encoder->possible_clones = 0;
619
620 return 0;
621}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
new file mode 100644
index 000000000000..b47c757ff48b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -0,0 +1,288 @@
1/*
2 * Copyright 2009 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Ben Skeggs
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "drm_crtc_helper.h"
28
29#include "nouveau_drv.h"
30#include "nouveau_fb.h"
31#include "nouveau_hw.h"
32#include "nouveau_encoder.h"
33#include "nouveau_connector.h"
34
35#define MULTIPLE_ENCODERS(e) (e & (e - 1))
36
37static void
38nv04_display_store_initial_head_owner(struct drm_device *dev)
39{
40 struct drm_nouveau_private *dev_priv = dev->dev_private;
41
42 if (dev_priv->chipset != 0x11) {
43 dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
44 goto ownerknown;
45 }
46
47 /* reading CR44 is broken on nv11, so we attempt to infer it */
48 if (nvReadMC(dev, NV_PBUS_DEBUG_1) & (1 << 28)) /* heads tied, restore both */
49 dev_priv->crtc_owner = 0x4;
50 else {
51 uint8_t slaved_on_A, slaved_on_B;
52 bool tvA = false;
53 bool tvB = false;
54
55 NVLockVgaCrtcs(dev, false);
56
57 slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
58 0x80;
59 if (slaved_on_B)
60 tvB = !(NVReadVgaCrtc(dev, 1, NV_CIO_CRE_LCD__INDEX) &
61 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
62
63 slaved_on_A = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_PIXEL_INDEX) &
64 0x80;
65 if (slaved_on_A)
66 tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
67 MASK(NV_CIO_CRE_LCD_LCD_SELECT));
68
69 NVLockVgaCrtcs(dev, true);
70
71 if (slaved_on_A && !tvA)
72 dev_priv->crtc_owner = 0x0;
73 else if (slaved_on_B && !tvB)
74 dev_priv->crtc_owner = 0x3;
75 else if (slaved_on_A)
76 dev_priv->crtc_owner = 0x0;
77 else if (slaved_on_B)
78 dev_priv->crtc_owner = 0x3;
79 else
80 dev_priv->crtc_owner = 0x0;
81 }
82
83ownerknown:
84 NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
85
86 /* we need to ensure the heads are not tied henceforth, or reading any
87 * 8 bit reg on head B will fail
88 * setting a single arbitrary head solves that */
89 NVSetOwner(dev, 0);
90}
91
92int
93nv04_display_create(struct drm_device *dev)
94{
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct parsed_dcb *dcb = dev_priv->vbios->dcb;
97 struct drm_encoder *encoder;
98 struct drm_crtc *crtc;
99 uint16_t connector[16] = { 0 };
100 int i, ret;
101
102 NV_DEBUG(dev, "\n");
103
104 if (nv_two_heads(dev))
105 nv04_display_store_initial_head_owner(dev);
106
107 drm_mode_config_init(dev);
108 drm_mode_create_scaling_mode_property(dev);
109 drm_mode_create_dithering_property(dev);
110
111 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
112
113 dev->mode_config.min_width = 0;
114 dev->mode_config.min_height = 0;
115 switch (dev_priv->card_type) {
116 case NV_04:
117 dev->mode_config.max_width = 2048;
118 dev->mode_config.max_height = 2048;
119 break;
120 default:
121 dev->mode_config.max_width = 4096;
122 dev->mode_config.max_height = 4096;
123 break;
124 }
125
126 dev->mode_config.fb_base = dev_priv->fb_phys;
127
128 nv04_crtc_create(dev, 0);
129 if (nv_two_heads(dev))
130 nv04_crtc_create(dev, 1);
131
132 for (i = 0; i < dcb->entries; i++) {
133 struct dcb_entry *dcbent = &dcb->entry[i];
134
135 switch (dcbent->type) {
136 case OUTPUT_ANALOG:
137 ret = nv04_dac_create(dev, dcbent);
138 break;
139 case OUTPUT_LVDS:
140 case OUTPUT_TMDS:
141 ret = nv04_dfp_create(dev, dcbent);
142 break;
143 case OUTPUT_TV:
144 if (dcbent->location == DCB_LOC_ON_CHIP)
145 ret = nv17_tv_create(dev, dcbent);
146 else
147 ret = nv04_tv_create(dev, dcbent);
148 break;
149 default:
150 NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
151 continue;
152 }
153
154 if (ret)
155 continue;
156
157 connector[dcbent->connector] |= (1 << dcbent->type);
158 }
159
160 for (i = 0; i < dcb->entries; i++) {
161 struct dcb_entry *dcbent = &dcb->entry[i];
162 uint16_t encoders;
163 int type;
164
165 encoders = connector[dcbent->connector];
166 if (!(encoders & (1 << dcbent->type)))
167 continue;
168 connector[dcbent->connector] = 0;
169
170 switch (dcbent->type) {
171 case OUTPUT_ANALOG:
172 if (!MULTIPLE_ENCODERS(encoders))
173 type = DRM_MODE_CONNECTOR_VGA;
174 else
175 type = DRM_MODE_CONNECTOR_DVII;
176 break;
177 case OUTPUT_TMDS:
178 if (!MULTIPLE_ENCODERS(encoders))
179 type = DRM_MODE_CONNECTOR_DVID;
180 else
181 type = DRM_MODE_CONNECTOR_DVII;
182 break;
183 case OUTPUT_LVDS:
184 type = DRM_MODE_CONNECTOR_LVDS;
185#if 0
186 /* don't create i2c adapter when lvds ddc not allowed */
187 if (dcbent->lvdsconf.use_straps_for_mode ||
188 dev_priv->vbios->fp_no_ddc)
189 i2c_index = 0xf;
190#endif
191 break;
192 case OUTPUT_TV:
193 type = DRM_MODE_CONNECTOR_TV;
194 break;
195 default:
196 type = DRM_MODE_CONNECTOR_Unknown;
197 continue;
198 }
199
200 nouveau_connector_create(dev, dcbent->connector, type);
201 }
202
203 /* Save previous state */
204 NVLockVgaCrtcs(dev, false);
205
206 nouveau_hw_save_vga_fonts(dev, 1);
207
208 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
209 crtc->funcs->save(crtc);
210
211 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
212 struct drm_encoder_helper_funcs *func = encoder->helper_private;
213
214 func->save(encoder);
215 }
216
217 return 0;
218}
219
220void
221nv04_display_destroy(struct drm_device *dev)
222{
223 struct drm_encoder *encoder;
224 struct drm_crtc *crtc;
225
226 NV_DEBUG(dev, "\n");
227
228 /* Turn every CRTC off. */
229 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
230 struct drm_mode_set modeset = {
231 .crtc = crtc,
232 };
233
234 crtc->funcs->set_config(&modeset);
235 }
236
237 /* Restore state */
238 NVLockVgaCrtcs(dev, false);
239
240 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
241 struct drm_encoder_helper_funcs *func = encoder->helper_private;
242
243 func->restore(encoder);
244 }
245
246 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
247 crtc->funcs->restore(crtc);
248
249 nouveau_hw_save_vga_fonts(dev, 0);
250
251 drm_mode_config_cleanup(dev);
252}
253
254void
255nv04_display_restore(struct drm_device *dev)
256{
257 struct drm_nouveau_private *dev_priv = dev->dev_private;
258 struct drm_encoder *encoder;
259 struct drm_crtc *crtc;
260
261 NVLockVgaCrtcs(dev, false);
262
263 /* meh.. modeset apparently doesn't setup all the regs and depends
264 * on pre-existing state, for now load the state of the card *before*
265 * nouveau was loaded, and then do a modeset.
266 *
267 * best thing to do probably is to make save/restore routines not
268 * save/restore "pre-load" state, but more general so we can save
269 * on suspend too.
270 */
271 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
272 struct drm_encoder_helper_funcs *func = encoder->helper_private;
273
274 func->restore(encoder);
275 }
276
277 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
278 crtc->funcs->restore(crtc);
279
280 if (nv_two_heads(dev)) {
281 NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
282 dev_priv->crtc_owner);
283 NVSetOwner(dev, dev_priv->crtc_owner);
284 }
285
286 NVLockVgaCrtcs(dev, true);
287}
288
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 000000000000..638cf601c427
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,21 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_fb_init(struct drm_device *dev)
8{
9 /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
10 * nvidia reading PFB_CFG_0, then writing back its original value.
11 * (which was 0x701114 in this case)
12 */
13
14 nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
15 return 0;
16}
17
18void
19nv04_fb_takedown(struct drm_device *dev)
20{
21}
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
new file mode 100644
index 000000000000..09a31071ee58
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -0,0 +1,316 @@
1/*
2 * Copyright 2009 Ben Skeggs
3 * Copyright 2008 Stuart Bennett
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "nouveau_drv.h"
27#include "nouveau_dma.h"
28#include "nouveau_fbcon.h"
29
30static void
31nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
32{
33 struct nouveau_fbcon_par *par = info->par;
34 struct drm_device *dev = par->dev;
35 struct drm_nouveau_private *dev_priv = dev->dev_private;
36 struct nouveau_channel *chan = dev_priv->channel;
37
38 if (info->state != FBINFO_STATE_RUNNING)
39 return;
40
41 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
42 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
43 info->flags |= FBINFO_HWACCEL_DISABLED;
44 }
45
46 if (info->flags & FBINFO_HWACCEL_DISABLED) {
47 cfb_copyarea(info, region);
48 return;
49 }
50
51 BEGIN_RING(chan, NvSubImageBlit, 0x0300, 3);
52 OUT_RING(chan, (region->sy << 16) | region->sx);
53 OUT_RING(chan, (region->dy << 16) | region->dx);
54 OUT_RING(chan, (region->height << 16) | region->width);
55 FIRE_RING(chan);
56}
57
58static void
59nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
60{
61 struct nouveau_fbcon_par *par = info->par;
62 struct drm_device *dev = par->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan = dev_priv->channel;
65 uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
66
67 if (info->state != FBINFO_STATE_RUNNING)
68 return;
69
70 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
71 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
72 info->flags |= FBINFO_HWACCEL_DISABLED;
73 }
74
75 if (info->flags & FBINFO_HWACCEL_DISABLED) {
76 cfb_fillrect(info, rect);
77 return;
78 }
79
80 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
81 OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
82 BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
83 OUT_RING(chan, color);
84 BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
85 OUT_RING(chan, (rect->dx << 16) | rect->dy);
86 OUT_RING(chan, (rect->width << 16) | rect->height);
87 FIRE_RING(chan);
88}
89
90static void
91nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
92{
93 struct nouveau_fbcon_par *par = info->par;
94 struct drm_device *dev = par->dev;
95 struct drm_nouveau_private *dev_priv = dev->dev_private;
96 struct nouveau_channel *chan = dev_priv->channel;
97 uint32_t fg;
98 uint32_t bg;
99 uint32_t dsize;
100 uint32_t width;
101 uint32_t *data = (uint32_t *)image->data;
102
103 if (info->state != FBINFO_STATE_RUNNING)
104 return;
105
106 if (image->depth != 1) {
107 cfb_imageblit(info, image);
108 return;
109 }
110
111 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
112 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
113 info->flags |= FBINFO_HWACCEL_DISABLED;
114 }
115
116 if (info->flags & FBINFO_HWACCEL_DISABLED) {
117 cfb_imageblit(info, image);
118 return;
119 }
120
121 width = (image->width + 31) & ~31;
122 dsize = (width * image->height) >> 5;
123
124 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
125 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
126 fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
127 bg = ((uint32_t *) info->pseudo_palette)[image->bg_color];
128 } else {
129 fg = image->fg_color;
130 bg = image->bg_color;
131 }
132
133 BEGIN_RING(chan, NvSubGdiRect, 0x0be4, 7);
134 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
135 OUT_RING(chan, ((image->dy + image->height) << 16) |
136 ((image->dx + image->width) & 0xffff));
137 OUT_RING(chan, bg);
138 OUT_RING(chan, fg);
139 OUT_RING(chan, (image->height << 16) | image->width);
140 OUT_RING(chan, (image->height << 16) | width);
141 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
142
143 while (dsize) {
144 int iter_len = dsize > 128 ? 128 : dsize;
145
146 if (RING_SPACE(chan, iter_len + 1)) {
147 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
148 info->flags |= FBINFO_HWACCEL_DISABLED;
149 cfb_imageblit(info, image);
150 return;
151 }
152
153 BEGIN_RING(chan, NvSubGdiRect, 0x0c00, iter_len);
154 OUT_RINGp(chan, data, iter_len);
155 data += iter_len;
156 dsize -= iter_len;
157 }
158
159 FIRE_RING(chan);
160}
161
162static int
163nv04_fbcon_grobj_new(struct drm_device *dev, int class, uint32_t handle)
164{
165 struct drm_nouveau_private *dev_priv = dev->dev_private;
166 struct nouveau_gpuobj *obj = NULL;
167 int ret;
168
169 ret = nouveau_gpuobj_gr_new(dev_priv->channel, class, &obj);
170 if (ret)
171 return ret;
172
173 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, handle, obj, NULL);
174 if (ret)
175 return ret;
176
177 return 0;
178}
179
180int
181nv04_fbcon_accel_init(struct fb_info *info)
182{
183 struct nouveau_fbcon_par *par = info->par;
184 struct drm_device *dev = par->dev;
185 struct drm_nouveau_private *dev_priv = dev->dev_private;
186 struct nouveau_channel *chan = dev_priv->channel;
187 int surface_fmt, pattern_fmt, rect_fmt;
188 int ret;
189
190 switch (info->var.bits_per_pixel) {
191 case 8:
192 surface_fmt = 1;
193 pattern_fmt = 3;
194 rect_fmt = 3;
195 break;
196 case 16:
197 surface_fmt = 4;
198 pattern_fmt = 1;
199 rect_fmt = 1;
200 break;
201 case 32:
202 switch (info->var.transp.length) {
203 case 0: /* depth 24 */
204 case 8: /* depth 32 */
205 break;
206 default:
207 return -EINVAL;
208 }
209
210 surface_fmt = 6;
211 pattern_fmt = 3;
212 rect_fmt = 3;
213 break;
214 default:
215 return -EINVAL;
216 }
217
218 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
219 0x0062 : 0x0042, NvCtxSurf2D);
220 if (ret)
221 return ret;
222
223 ret = nv04_fbcon_grobj_new(dev, 0x0019, NvClipRect);
224 if (ret)
225 return ret;
226
227 ret = nv04_fbcon_grobj_new(dev, 0x0043, NvRop);
228 if (ret)
229 return ret;
230
231 ret = nv04_fbcon_grobj_new(dev, 0x0044, NvImagePatt);
232 if (ret)
233 return ret;
234
235 ret = nv04_fbcon_grobj_new(dev, 0x004a, NvGdiRect);
236 if (ret)
237 return ret;
238
239 ret = nv04_fbcon_grobj_new(dev, dev_priv->card_type >= NV_10 ?
240 0x009f : 0x005f, NvImageBlit);
241 if (ret)
242 return ret;
243
244 if (RING_SPACE(chan, 49)) {
245 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
246 info->flags |= FBINFO_HWACCEL_DISABLED;
247 return 0;
248 }
249
250 BEGIN_RING(chan, 1, 0x0000, 1);
251 OUT_RING(chan, NvCtxSurf2D);
252 BEGIN_RING(chan, 1, 0x0184, 2);
253 OUT_RING(chan, NvDmaFB);
254 OUT_RING(chan, NvDmaFB);
255 BEGIN_RING(chan, 1, 0x0300, 4);
256 OUT_RING(chan, surface_fmt);
257 OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
258 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
259 OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
260
261 BEGIN_RING(chan, 1, 0x0000, 1);
262 OUT_RING(chan, NvRop);
263 BEGIN_RING(chan, 1, 0x0300, 1);
264 OUT_RING(chan, 0x55);
265
266 BEGIN_RING(chan, 1, 0x0000, 1);
267 OUT_RING(chan, NvImagePatt);
268 BEGIN_RING(chan, 1, 0x0300, 8);
269 OUT_RING(chan, pattern_fmt);
270#ifdef __BIG_ENDIAN
271 OUT_RING(chan, 2);
272#else
273 OUT_RING(chan, 1);
274#endif
275 OUT_RING(chan, 0);
276 OUT_RING(chan, 1);
277 OUT_RING(chan, ~0);
278 OUT_RING(chan, ~0);
279 OUT_RING(chan, ~0);
280 OUT_RING(chan, ~0);
281
282 BEGIN_RING(chan, 1, 0x0000, 1);
283 OUT_RING(chan, NvClipRect);
284 BEGIN_RING(chan, 1, 0x0300, 2);
285 OUT_RING(chan, 0);
286 OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
287
288 BEGIN_RING(chan, NvSubImageBlit, 0x0000, 1);
289 OUT_RING(chan, NvImageBlit);
290 BEGIN_RING(chan, NvSubImageBlit, 0x019c, 1);
291 OUT_RING(chan, NvCtxSurf2D);
292 BEGIN_RING(chan, NvSubImageBlit, 0x02fc, 1);
293 OUT_RING(chan, 3);
294
295 BEGIN_RING(chan, NvSubGdiRect, 0x0000, 1);
296 OUT_RING(chan, NvGdiRect);
297 BEGIN_RING(chan, NvSubGdiRect, 0x0198, 1);
298 OUT_RING(chan, NvCtxSurf2D);
299 BEGIN_RING(chan, NvSubGdiRect, 0x0188, 2);
300 OUT_RING(chan, NvImagePatt);
301 OUT_RING(chan, NvRop);
302 BEGIN_RING(chan, NvSubGdiRect, 0x0304, 1);
303 OUT_RING(chan, 1);
304 BEGIN_RING(chan, NvSubGdiRect, 0x0300, 1);
305 OUT_RING(chan, rect_fmt);
306 BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
307 OUT_RING(chan, 3);
308
309 FIRE_RING(chan);
310
311 info->fbops->fb_fillrect = nv04_fbcon_fillrect;
312 info->fbops->fb_copyarea = nv04_fbcon_copyarea;
313 info->fbops->fb_imageblit = nv04_fbcon_imageblit;
314 return 0;
315}
316
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 000000000000..0c3cd53c7313
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,271 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
32#define NV04_RAMFC__SIZE 32
33#define NV04_RAMFC_DMA_PUT 0x00
34#define NV04_RAMFC_DMA_GET 0x04
35#define NV04_RAMFC_DMA_INSTANCE 0x08
36#define NV04_RAMFC_DMA_STATE 0x0C
37#define NV04_RAMFC_DMA_FETCH 0x10
38#define NV04_RAMFC_ENGINE 0x14
39#define NV04_RAMFC_PULL1_ENGINE 0x18
40
41#define RAMFC_WR(offset, val) nv_wo32(dev, chan->ramfc->gpuobj, \
42 NV04_RAMFC_##offset/4, (val))
43#define RAMFC_RD(offset) nv_ro32(dev, chan->ramfc->gpuobj, \
44 NV04_RAMFC_##offset/4)
45
46void
47nv04_fifo_disable(struct drm_device *dev)
48{
49 uint32_t tmp;
50
51 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
52 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
53 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
54 tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
55 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
56}
57
58void
59nv04_fifo_enable(struct drm_device *dev)
60{
61 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
62 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
63}
64
65bool
66nv04_fifo_reassign(struct drm_device *dev, bool enable)
67{
68 uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
69
70 nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
71 return (reassign == 1);
72}
73
74int
75nv04_fifo_channel_id(struct drm_device *dev)
76{
77 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
78 NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
79}
80
81int
82nv04_fifo_create_context(struct nouveau_channel *chan)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 int ret;
87
88 ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
89 NV04_RAMFC__SIZE,
90 NVOBJ_FLAG_ZERO_ALLOC |
91 NVOBJ_FLAG_ZERO_FREE,
92 NULL, &chan->ramfc);
93 if (ret)
94 return ret;
95
96 /* Setup initial state */
97 dev_priv->engine.instmem.prepare_access(dev, true);
98 RAMFC_WR(DMA_PUT, chan->pushbuf_base);
99 RAMFC_WR(DMA_GET, chan->pushbuf_base);
100 RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
101 RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
102 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
103 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
104#ifdef __BIG_ENDIAN
105 NV_PFIFO_CACHE1_BIG_ENDIAN |
106#endif
107 0));
108 dev_priv->engine.instmem.finish_access(dev);
109
110 /* enable the fifo dma operation */
111 nv_wr32(dev, NV04_PFIFO_MODE,
112 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
113 return 0;
114}
115
116void
117nv04_fifo_destroy_context(struct nouveau_channel *chan)
118{
119 struct drm_device *dev = chan->dev;
120
121 nv_wr32(dev, NV04_PFIFO_MODE,
122 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
123
124 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
125}
126
127static void
128nv04_fifo_do_load_context(struct drm_device *dev, int chid)
129{
130 struct drm_nouveau_private *dev_priv = dev->dev_private;
131 uint32_t fc = NV04_RAMFC(chid), tmp;
132
133 dev_priv->engine.instmem.prepare_access(dev, false);
134
135 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
136 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
137 tmp = nv_ri32(dev, fc + 8);
138 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
139 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
140 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
141 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
142 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
143 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
144
145 dev_priv->engine.instmem.finish_access(dev);
146
147 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
148 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
149}
150
151int
152nv04_fifo_load_context(struct nouveau_channel *chan)
153{
154 uint32_t tmp;
155
156 nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
157 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
158 nv04_fifo_do_load_context(chan->dev, chan->id);
159 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
160
161 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
162 tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
163 nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
164
165 return 0;
166}
167
168int
169nv04_fifo_unload_context(struct drm_device *dev)
170{
171 struct drm_nouveau_private *dev_priv = dev->dev_private;
172 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
173 struct nouveau_channel *chan = NULL;
174 uint32_t tmp;
175 int chid;
176
177 chid = pfifo->channel_id(dev);
178 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
179 return 0;
180
181 chan = dev_priv->fifos[chid];
182 if (!chan) {
183 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
184 return -EINVAL;
185 }
186
187 dev_priv->engine.instmem.prepare_access(dev, true);
188 RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
189 RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
190 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
191 tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
192 RAMFC_WR(DMA_INSTANCE, tmp);
193 RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
194 RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
195 RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
196 RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
197 dev_priv->engine.instmem.finish_access(dev);
198
199 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
200 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
201 return 0;
202}
203
204static void
205nv04_fifo_init_reset(struct drm_device *dev)
206{
207 nv_wr32(dev, NV03_PMC_ENABLE,
208 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
209 nv_wr32(dev, NV03_PMC_ENABLE,
210 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
211
212 nv_wr32(dev, 0x003224, 0x000f0078);
213 nv_wr32(dev, 0x002044, 0x0101ffff);
214 nv_wr32(dev, 0x002040, 0x000000ff);
215 nv_wr32(dev, 0x002500, 0x00000000);
216 nv_wr32(dev, 0x003000, 0x00000000);
217 nv_wr32(dev, 0x003050, 0x00000000);
218 nv_wr32(dev, 0x003200, 0x00000000);
219 nv_wr32(dev, 0x003250, 0x00000000);
220 nv_wr32(dev, 0x003220, 0x00000000);
221
222 nv_wr32(dev, 0x003250, 0x00000000);
223 nv_wr32(dev, 0x003270, 0x00000000);
224 nv_wr32(dev, 0x003210, 0x00000000);
225}
226
227static void
228nv04_fifo_init_ramxx(struct drm_device *dev)
229{
230 struct drm_nouveau_private *dev_priv = dev->dev_private;
231
232 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
233 ((dev_priv->ramht_bits - 9) << 16) |
234 (dev_priv->ramht_offset >> 8));
235 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
236 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
237}
238
239static void
240nv04_fifo_init_intr(struct drm_device *dev)
241{
242 nv_wr32(dev, 0x002100, 0xffffffff);
243 nv_wr32(dev, 0x002140, 0xffffffff);
244}
245
246int
247nv04_fifo_init(struct drm_device *dev)
248{
249 struct drm_nouveau_private *dev_priv = dev->dev_private;
250 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
251 int i;
252
253 nv04_fifo_init_reset(dev);
254 nv04_fifo_init_ramxx(dev);
255
256 nv04_fifo_do_load_context(dev, pfifo->channels - 1);
257 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
258
259 nv04_fifo_init_intr(dev);
260 pfifo->enable(dev);
261
262 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
263 if (dev_priv->fifos[i]) {
264 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
265 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
266 }
267 }
268
269 return 0;
270}
271
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 000000000000..396ee92118f6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,579 @@
1/*
2 * Copyright 2007 Stephane Marchesin
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29
30static uint32_t nv04_graph_ctx_regs[] = {
31 NV04_PGRAPH_CTX_SWITCH1,
32 NV04_PGRAPH_CTX_SWITCH2,
33 NV04_PGRAPH_CTX_SWITCH3,
34 NV04_PGRAPH_CTX_SWITCH4,
35 NV04_PGRAPH_CTX_CACHE1,
36 NV04_PGRAPH_CTX_CACHE2,
37 NV04_PGRAPH_CTX_CACHE3,
38 NV04_PGRAPH_CTX_CACHE4,
39 0x00400184,
40 0x004001a4,
41 0x004001c4,
42 0x004001e4,
43 0x00400188,
44 0x004001a8,
45 0x004001c8,
46 0x004001e8,
47 0x0040018c,
48 0x004001ac,
49 0x004001cc,
50 0x004001ec,
51 0x00400190,
52 0x004001b0,
53 0x004001d0,
54 0x004001f0,
55 0x00400194,
56 0x004001b4,
57 0x004001d4,
58 0x004001f4,
59 0x00400198,
60 0x004001b8,
61 0x004001d8,
62 0x004001f8,
63 0x0040019c,
64 0x004001bc,
65 0x004001dc,
66 0x004001fc,
67 0x00400174,
68 NV04_PGRAPH_DMA_START_0,
69 NV04_PGRAPH_DMA_START_1,
70 NV04_PGRAPH_DMA_LENGTH,
71 NV04_PGRAPH_DMA_MISC,
72 NV04_PGRAPH_DMA_PITCH,
73 NV04_PGRAPH_BOFFSET0,
74 NV04_PGRAPH_BBASE0,
75 NV04_PGRAPH_BLIMIT0,
76 NV04_PGRAPH_BOFFSET1,
77 NV04_PGRAPH_BBASE1,
78 NV04_PGRAPH_BLIMIT1,
79 NV04_PGRAPH_BOFFSET2,
80 NV04_PGRAPH_BBASE2,
81 NV04_PGRAPH_BLIMIT2,
82 NV04_PGRAPH_BOFFSET3,
83 NV04_PGRAPH_BBASE3,
84 NV04_PGRAPH_BLIMIT3,
85 NV04_PGRAPH_BOFFSET4,
86 NV04_PGRAPH_BBASE4,
87 NV04_PGRAPH_BLIMIT4,
88 NV04_PGRAPH_BOFFSET5,
89 NV04_PGRAPH_BBASE5,
90 NV04_PGRAPH_BLIMIT5,
91 NV04_PGRAPH_BPITCH0,
92 NV04_PGRAPH_BPITCH1,
93 NV04_PGRAPH_BPITCH2,
94 NV04_PGRAPH_BPITCH3,
95 NV04_PGRAPH_BPITCH4,
96 NV04_PGRAPH_SURFACE,
97 NV04_PGRAPH_STATE,
98 NV04_PGRAPH_BSWIZZLE2,
99 NV04_PGRAPH_BSWIZZLE5,
100 NV04_PGRAPH_BPIXEL,
101 NV04_PGRAPH_NOTIFY,
102 NV04_PGRAPH_PATT_COLOR0,
103 NV04_PGRAPH_PATT_COLOR1,
104 NV04_PGRAPH_PATT_COLORRAM+0x00,
105 NV04_PGRAPH_PATT_COLORRAM+0x01,
106 NV04_PGRAPH_PATT_COLORRAM+0x02,
107 NV04_PGRAPH_PATT_COLORRAM+0x03,
108 NV04_PGRAPH_PATT_COLORRAM+0x04,
109 NV04_PGRAPH_PATT_COLORRAM+0x05,
110 NV04_PGRAPH_PATT_COLORRAM+0x06,
111 NV04_PGRAPH_PATT_COLORRAM+0x07,
112 NV04_PGRAPH_PATT_COLORRAM+0x08,
113 NV04_PGRAPH_PATT_COLORRAM+0x09,
114 NV04_PGRAPH_PATT_COLORRAM+0x0A,
115 NV04_PGRAPH_PATT_COLORRAM+0x0B,
116 NV04_PGRAPH_PATT_COLORRAM+0x0C,
117 NV04_PGRAPH_PATT_COLORRAM+0x0D,
118 NV04_PGRAPH_PATT_COLORRAM+0x0E,
119 NV04_PGRAPH_PATT_COLORRAM+0x0F,
120 NV04_PGRAPH_PATT_COLORRAM+0x10,
121 NV04_PGRAPH_PATT_COLORRAM+0x11,
122 NV04_PGRAPH_PATT_COLORRAM+0x12,
123 NV04_PGRAPH_PATT_COLORRAM+0x13,
124 NV04_PGRAPH_PATT_COLORRAM+0x14,
125 NV04_PGRAPH_PATT_COLORRAM+0x15,
126 NV04_PGRAPH_PATT_COLORRAM+0x16,
127 NV04_PGRAPH_PATT_COLORRAM+0x17,
128 NV04_PGRAPH_PATT_COLORRAM+0x18,
129 NV04_PGRAPH_PATT_COLORRAM+0x19,
130 NV04_PGRAPH_PATT_COLORRAM+0x1A,
131 NV04_PGRAPH_PATT_COLORRAM+0x1B,
132 NV04_PGRAPH_PATT_COLORRAM+0x1C,
133 NV04_PGRAPH_PATT_COLORRAM+0x1D,
134 NV04_PGRAPH_PATT_COLORRAM+0x1E,
135 NV04_PGRAPH_PATT_COLORRAM+0x1F,
136 NV04_PGRAPH_PATT_COLORRAM+0x20,
137 NV04_PGRAPH_PATT_COLORRAM+0x21,
138 NV04_PGRAPH_PATT_COLORRAM+0x22,
139 NV04_PGRAPH_PATT_COLORRAM+0x23,
140 NV04_PGRAPH_PATT_COLORRAM+0x24,
141 NV04_PGRAPH_PATT_COLORRAM+0x25,
142 NV04_PGRAPH_PATT_COLORRAM+0x26,
143 NV04_PGRAPH_PATT_COLORRAM+0x27,
144 NV04_PGRAPH_PATT_COLORRAM+0x28,
145 NV04_PGRAPH_PATT_COLORRAM+0x29,
146 NV04_PGRAPH_PATT_COLORRAM+0x2A,
147 NV04_PGRAPH_PATT_COLORRAM+0x2B,
148 NV04_PGRAPH_PATT_COLORRAM+0x2C,
149 NV04_PGRAPH_PATT_COLORRAM+0x2D,
150 NV04_PGRAPH_PATT_COLORRAM+0x2E,
151 NV04_PGRAPH_PATT_COLORRAM+0x2F,
152 NV04_PGRAPH_PATT_COLORRAM+0x30,
153 NV04_PGRAPH_PATT_COLORRAM+0x31,
154 NV04_PGRAPH_PATT_COLORRAM+0x32,
155 NV04_PGRAPH_PATT_COLORRAM+0x33,
156 NV04_PGRAPH_PATT_COLORRAM+0x34,
157 NV04_PGRAPH_PATT_COLORRAM+0x35,
158 NV04_PGRAPH_PATT_COLORRAM+0x36,
159 NV04_PGRAPH_PATT_COLORRAM+0x37,
160 NV04_PGRAPH_PATT_COLORRAM+0x38,
161 NV04_PGRAPH_PATT_COLORRAM+0x39,
162 NV04_PGRAPH_PATT_COLORRAM+0x3A,
163 NV04_PGRAPH_PATT_COLORRAM+0x3B,
164 NV04_PGRAPH_PATT_COLORRAM+0x3C,
165 NV04_PGRAPH_PATT_COLORRAM+0x3D,
166 NV04_PGRAPH_PATT_COLORRAM+0x3E,
167 NV04_PGRAPH_PATT_COLORRAM+0x3F,
168 NV04_PGRAPH_PATTERN,
169 0x0040080c,
170 NV04_PGRAPH_PATTERN_SHAPE,
171 0x00400600,
172 NV04_PGRAPH_ROP3,
173 NV04_PGRAPH_CHROMA,
174 NV04_PGRAPH_BETA_AND,
175 NV04_PGRAPH_BETA_PREMULT,
176 NV04_PGRAPH_CONTROL0,
177 NV04_PGRAPH_CONTROL1,
178 NV04_PGRAPH_CONTROL2,
179 NV04_PGRAPH_BLEND,
180 NV04_PGRAPH_STORED_FMT,
181 NV04_PGRAPH_SOURCE_COLOR,
182 0x00400560,
183 0x00400568,
184 0x00400564,
185 0x0040056c,
186 0x00400400,
187 0x00400480,
188 0x00400404,
189 0x00400484,
190 0x00400408,
191 0x00400488,
192 0x0040040c,
193 0x0040048c,
194 0x00400410,
195 0x00400490,
196 0x00400414,
197 0x00400494,
198 0x00400418,
199 0x00400498,
200 0x0040041c,
201 0x0040049c,
202 0x00400420,
203 0x004004a0,
204 0x00400424,
205 0x004004a4,
206 0x00400428,
207 0x004004a8,
208 0x0040042c,
209 0x004004ac,
210 0x00400430,
211 0x004004b0,
212 0x00400434,
213 0x004004b4,
214 0x00400438,
215 0x004004b8,
216 0x0040043c,
217 0x004004bc,
218 0x00400440,
219 0x004004c0,
220 0x00400444,
221 0x004004c4,
222 0x00400448,
223 0x004004c8,
224 0x0040044c,
225 0x004004cc,
226 0x00400450,
227 0x004004d0,
228 0x00400454,
229 0x004004d4,
230 0x00400458,
231 0x004004d8,
232 0x0040045c,
233 0x004004dc,
234 0x00400460,
235 0x004004e0,
236 0x00400464,
237 0x004004e4,
238 0x00400468,
239 0x004004e8,
240 0x0040046c,
241 0x004004ec,
242 0x00400470,
243 0x004004f0,
244 0x00400474,
245 0x004004f4,
246 0x00400478,
247 0x004004f8,
248 0x0040047c,
249 0x004004fc,
250 0x0040053c,
251 0x00400544,
252 0x00400540,
253 0x00400548,
254 0x00400560,
255 0x00400568,
256 0x00400564,
257 0x0040056c,
258 0x00400534,
259 0x00400538,
260 0x00400514,
261 0x00400518,
262 0x0040051c,
263 0x00400520,
264 0x00400524,
265 0x00400528,
266 0x0040052c,
267 0x00400530,
268 0x00400d00,
269 0x00400d40,
270 0x00400d80,
271 0x00400d04,
272 0x00400d44,
273 0x00400d84,
274 0x00400d08,
275 0x00400d48,
276 0x00400d88,
277 0x00400d0c,
278 0x00400d4c,
279 0x00400d8c,
280 0x00400d10,
281 0x00400d50,
282 0x00400d90,
283 0x00400d14,
284 0x00400d54,
285 0x00400d94,
286 0x00400d18,
287 0x00400d58,
288 0x00400d98,
289 0x00400d1c,
290 0x00400d5c,
291 0x00400d9c,
292 0x00400d20,
293 0x00400d60,
294 0x00400da0,
295 0x00400d24,
296 0x00400d64,
297 0x00400da4,
298 0x00400d28,
299 0x00400d68,
300 0x00400da8,
301 0x00400d2c,
302 0x00400d6c,
303 0x00400dac,
304 0x00400d30,
305 0x00400d70,
306 0x00400db0,
307 0x00400d34,
308 0x00400d74,
309 0x00400db4,
310 0x00400d38,
311 0x00400d78,
312 0x00400db8,
313 0x00400d3c,
314 0x00400d7c,
315 0x00400dbc,
316 0x00400590,
317 0x00400594,
318 0x00400598,
319 0x0040059c,
320 0x004005a8,
321 0x004005ac,
322 0x004005b0,
323 0x004005b4,
324 0x004005c0,
325 0x004005c4,
326 0x004005c8,
327 0x004005cc,
328 0x004005d0,
329 0x004005d4,
330 0x004005d8,
331 0x004005dc,
332 0x004005e0,
333 NV04_PGRAPH_PASSTHRU_0,
334 NV04_PGRAPH_PASSTHRU_1,
335 NV04_PGRAPH_PASSTHRU_2,
336 NV04_PGRAPH_DVD_COLORFMT,
337 NV04_PGRAPH_SCALED_FORMAT,
338 NV04_PGRAPH_MISC24_0,
339 NV04_PGRAPH_MISC24_1,
340 NV04_PGRAPH_MISC24_2,
341 0x00400500,
342 0x00400504,
343 NV04_PGRAPH_VALID1,
344 NV04_PGRAPH_VALID2
345
346
347};
348
349struct graph_state {
350 int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
351};
352
353struct nouveau_channel *
354nv04_graph_channel(struct drm_device *dev)
355{
356 struct drm_nouveau_private *dev_priv = dev->dev_private;
357 int chid = dev_priv->engine.fifo.channels;
358
359 if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
360 chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
361
362 if (chid >= dev_priv->engine.fifo.channels)
363 return NULL;
364
365 return dev_priv->fifos[chid];
366}
367
368void
369nv04_graph_context_switch(struct drm_device *dev)
370{
371 struct drm_nouveau_private *dev_priv = dev->dev_private;
372 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
373 struct nouveau_channel *chan = NULL;
374 int chid;
375
376 pgraph->fifo_access(dev, false);
377 nouveau_wait_for_idle(dev);
378
379 /* If previous context is valid, we need to save it */
380 pgraph->unload_context(dev);
381
382 /* Load context for next channel */
383 chid = dev_priv->engine.fifo.channel_id(dev);
384 chan = dev_priv->fifos[chid];
385 if (chan)
386 nv04_graph_load_context(chan);
387
388 pgraph->fifo_access(dev, true);
389}
390
391int nv04_graph_create_context(struct nouveau_channel *chan)
392{
393 struct graph_state *pgraph_ctx;
394 NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
395
396 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
397 GFP_KERNEL);
398 if (pgraph_ctx == NULL)
399 return -ENOMEM;
400
401 /* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
402 pgraph_ctx->nv04[0] = 0x0001ffff;
403 /* is it really needed ??? */
404#if 0
405 dev_priv->fifos[channel].pgraph_ctx[1] =
406 nv_rd32(dev, NV_PGRAPH_DEBUG_4);
407 dev_priv->fifos[channel].pgraph_ctx[2] =
408 nv_rd32(dev, 0x004006b0);
409#endif
410 return 0;
411}
412
413void nv04_graph_destroy_context(struct nouveau_channel *chan)
414{
415 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
416
417 kfree(pgraph_ctx);
418 chan->pgraph_ctx = NULL;
419}
420
421int nv04_graph_load_context(struct nouveau_channel *chan)
422{
423 struct drm_device *dev = chan->dev;
424 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
425 uint32_t tmp;
426 int i;
427
428 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
429 nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
430
431 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
432 nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
433 tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
434 nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
435 return 0;
436}
437
438int
439nv04_graph_unload_context(struct drm_device *dev)
440{
441 struct drm_nouveau_private *dev_priv = dev->dev_private;
442 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
443 struct nouveau_channel *chan = NULL;
444 struct graph_state *ctx;
445 uint32_t tmp;
446 int i;
447
448 chan = pgraph->channel(dev);
449 if (!chan)
450 return 0;
451 ctx = chan->pgraph_ctx;
452
453 for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
454 ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
455
456 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
457 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
458 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
459 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
460 return 0;
461}
462
463int nv04_graph_init(struct drm_device *dev)
464{
465 struct drm_nouveau_private *dev_priv = dev->dev_private;
466 uint32_t tmp;
467
468 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
469 ~NV_PMC_ENABLE_PGRAPH);
470 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
471 NV_PMC_ENABLE_PGRAPH);
472
473 /* Enable PGRAPH interrupts */
474 nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
475 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
476
477 nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
478 nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
479 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
480 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
481 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
482 /*1231C000 blob, 001 haiku*/
483 //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
484 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
485 /*0x72111100 blob , 01 haiku*/
486 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
487 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
488 /*haiku same*/
489
490 /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
491 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
492 /*haiku and blob 10d4*/
493
494 nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
495 nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
496 tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
497 tmp |= dev_priv->engine.fifo.channels << 24;
498 nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
499
500 /* These don't belong here, they're part of a per-channel context */
501 nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
502 nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
503
504 return 0;
505}
506
507void nv04_graph_takedown(struct drm_device *dev)
508{
509}
510
511void
512nv04_graph_fifo_access(struct drm_device *dev, bool enabled)
513{
514 if (enabled)
515 nv_wr32(dev, NV04_PGRAPH_FIFO,
516 nv_rd32(dev, NV04_PGRAPH_FIFO) | 1);
517 else
518 nv_wr32(dev, NV04_PGRAPH_FIFO,
519 nv_rd32(dev, NV04_PGRAPH_FIFO) & ~1);
520}
521
522static int
523nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
524 int mthd, uint32_t data)
525{
526 chan->fence.last_sequence_irq = data;
527 nouveau_fence_handler(chan->dev, chan->id);
528 return 0;
529}
530
531static int
532nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
533 int mthd, uint32_t data)
534{
535 struct drm_device *dev = chan->dev;
536 uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
537 int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
538 uint32_t tmp;
539
540 tmp = nv_ri32(dev, instance);
541 tmp &= ~0x00038000;
542 tmp |= ((data & 7) << 15);
543
544 nv_wi32(dev, instance, tmp);
545 nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
546 nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
547 return 0;
548}
549
550static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
551 { 0x0150, nv04_graph_mthd_set_ref },
552 {}
553};
554
555static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
556 { 0x02fc, nv04_graph_mthd_set_operation },
557 {},
558};
559
560struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
561 { 0x0039, false, nv04_graph_mthds_m2mf },
562 { 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
563 { 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
564 { 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
565 { 0x0077, false, nv04_graph_mthds_set_operation }, /* sifm */
566 { 0x0030, false, NULL }, /* null */
567 { 0x0042, false, NULL }, /* surf2d */
568 { 0x0043, false, NULL }, /* rop */
569 { 0x0012, false, NULL }, /* beta1 */
570 { 0x0072, false, NULL }, /* beta4 */
571 { 0x0019, false, NULL }, /* cliprect */
572 { 0x0044, false, NULL }, /* pattern */
573 { 0x0052, false, NULL }, /* swzsurf */
574 { 0x0053, false, NULL }, /* surf3d */
575 { 0x0054, false, NULL }, /* tex_tri */
576 { 0x0055, false, NULL }, /* multitex_tri */
577 {}
578};
579
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 000000000000..a20c206625a2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,208 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4
5/* returns the size of fifo context */
6static int
7nouveau_fifo_ctx_size(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10
11 if (dev_priv->chipset >= 0x40)
12 return 128;
13 else
14 if (dev_priv->chipset >= 0x17)
15 return 64;
16
17 return 32;
18}
19
20static void
21nv04_instmem_determine_amount(struct drm_device *dev)
22{
23 struct drm_nouveau_private *dev_priv = dev->dev_private;
24 int i;
25
26 /* Figure out how much instance memory we need */
27 if (dev_priv->card_type >= NV_40) {
28 /* We'll want more instance memory than this on some NV4x cards.
29 * There's a 16MB aperture to play with that maps onto the end
30 * of vram. For now, only reserve a small piece until we know
31 * more about what each chipset requires.
32 */
33 switch (dev_priv->chipset & 0xf0) {
34 case 0x40:
35 case 0x47:
36 case 0x49:
37 case 0x4b:
38 dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
39 break;
40 default:
41 dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
42 break;
43 }
44 } else {
45 /*XXX: what *are* the limits on <NV40 cards?
46 */
47 dev_priv->ramin_rsvd_vram = (512 * 1024);
48 }
49 NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
50
51 /* Clear all of it, except the BIOS image that's in the first 64KiB */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
54 nv_wi32(dev, i, 0x00000000);
55 dev_priv->engine.instmem.finish_access(dev);
56}
57
58static void
59nv04_instmem_configure_fixed_tables(struct drm_device *dev)
60{
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 struct nouveau_engine *engine = &dev_priv->engine;
63
64 /* FIFO hash table (RAMHT)
65 * use 4k hash table at RAMIN+0x10000
66 * TODO: extend the hash table
67 */
68 dev_priv->ramht_offset = 0x10000;
69 dev_priv->ramht_bits = 9;
70 dev_priv->ramht_size = (1 << dev_priv->ramht_bits); /* nr entries */
71 dev_priv->ramht_size *= 8; /* 2 32-bit values per entry in RAMHT */
72 NV_DEBUG(dev, "RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
73 dev_priv->ramht_size);
74
75 /* FIFO runout table (RAMRO) - 512k at 0x11200 */
76 dev_priv->ramro_offset = 0x11200;
77 dev_priv->ramro_size = 512;
78 NV_DEBUG(dev, "RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
79 dev_priv->ramro_size);
80
81 /* FIFO context table (RAMFC)
82 * NV40 : Not sure exactly how to position RAMFC on some cards,
83 * 0x30002 seems to position it at RAMIN+0x20000 on these
84 * cards. RAMFC is 4kb (32 fifos, 128byte entries).
85 * Others: Position RAMFC at RAMIN+0x11400
86 */
87 dev_priv->ramfc_size = engine->fifo.channels *
88 nouveau_fifo_ctx_size(dev);
89 switch (dev_priv->card_type) {
90 case NV_40:
91 dev_priv->ramfc_offset = 0x20000;
92 break;
93 case NV_30:
94 case NV_20:
95 case NV_10:
96 case NV_04:
97 default:
98 dev_priv->ramfc_offset = 0x11400;
99 break;
100 }
101 NV_DEBUG(dev, "RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
102 dev_priv->ramfc_size);
103}
104
105int nv04_instmem_init(struct drm_device *dev)
106{
107 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 uint32_t offset;
109 int ret = 0;
110
111 nv04_instmem_determine_amount(dev);
112 nv04_instmem_configure_fixed_tables(dev);
113
114 /* Create a heap to manage RAMIN allocations, we don't allocate
115 * the space that was reserved for RAMHT/FC/RO.
116 */
117 offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
118
119 /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
120 * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
121 * ("new style" control) the upper 16-bits of 0x2220 points at this
122 * other mysterious table that's clobbering important things.
123 *
124 * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
125 * smashed to pieces on us, so reserve 0x30000-0x40000 too..
126 */
127 if (dev_priv->card_type >= NV_40) {
128 if (offset < 0x40000)
129 offset = 0x40000;
130 }
131
132 ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
133 offset, dev_priv->ramin_rsvd_vram - offset);
134 if (ret) {
135 dev_priv->ramin_heap = NULL;
136 NV_ERROR(dev, "Failed to init RAMIN heap\n");
137 }
138
139 return ret;
140}
141
142void
143nv04_instmem_takedown(struct drm_device *dev)
144{
145}
146
147int
148nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
149{
150 if (gpuobj->im_backing)
151 return -EINVAL;
152
153 return 0;
154}
155
156void
157nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
158{
159 struct drm_nouveau_private *dev_priv = dev->dev_private;
160
161 if (gpuobj && gpuobj->im_backing) {
162 if (gpuobj->im_bound)
163 dev_priv->engine.instmem.unbind(dev, gpuobj);
164 gpuobj->im_backing = NULL;
165 }
166}
167
168int
169nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
170{
171 if (!gpuobj->im_pramin || gpuobj->im_bound)
172 return -EINVAL;
173
174 gpuobj->im_bound = 1;
175 return 0;
176}
177
178int
179nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
180{
181 if (gpuobj->im_bound == 0)
182 return -EINVAL;
183
184 gpuobj->im_bound = 0;
185 return 0;
186}
187
188void
189nv04_instmem_prepare_access(struct drm_device *dev, bool write)
190{
191}
192
193void
194nv04_instmem_finish_access(struct drm_device *dev)
195{
196}
197
198int
199nv04_instmem_suspend(struct drm_device *dev)
200{
201 return 0;
202}
203
204void
205nv04_instmem_resume(struct drm_device *dev)
206{
207}
208
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 000000000000..617ed1e05269
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,20 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_mc_init(struct drm_device *dev)
8{
9 /* Power up everything, resetting each individual unit will
10 * be done later if needed.
11 */
12
13 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
14 return 0;
15}
16
17void
18nv04_mc_takedown(struct drm_device *dev)
19{
20}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 000000000000..1d09ddd57399
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,51 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv04_timer_init(struct drm_device *dev)
8{
9 nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
10 nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
11
12 /* Just use the pre-existing values when possible for now; these regs
13 * are not written in nv (driver writer missed a /4 on the address), and
14 * writing 8 and 3 to the correct regs breaks the timings on the LVDS
15 * hardware sequencing microcode.
16 * A correct solution (involving calculations with the GPU PLL) can
17 * be done when kernel modesetting lands
18 */
19 if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
20 !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
21 nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
22 nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
23 }
24
25 return 0;
26}
27
28uint64_t
29nv04_timer_read(struct drm_device *dev)
30{
31 uint32_t low;
32 /* From kmmio dumps on nv28 this looks like how the blob does this.
33 * It reads the high dword twice, before and after.
34 * The only explanation seems to be that the 64-bit timer counter
35 * advances between high and low dword reads and may corrupt the
36 * result. Not confirmed.
37 */
38 uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
39 uint32_t high1;
40 do {
41 high1 = high2;
42 low = nv_rd32(dev, NV04_PTIMER_TIME_0);
43 high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
44 } while (high1 != high2);
45 return (((uint64_t)high2) << 32) | (uint64_t)low;
46}
47
48void
49nv04_timer_takedown(struct drm_device *dev)
50{
51}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
new file mode 100644
index 000000000000..9c63099e9c42
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -0,0 +1,305 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_encoder.h"
30#include "nouveau_connector.h"
31#include "nouveau_crtc.h"
32#include "nouveau_hw.h"
33#include "drm_crtc_helper.h"
34
35#include "i2c/ch7006.h"
36
37static struct {
38 struct i2c_board_info board_info;
39 struct drm_encoder_funcs funcs;
40 struct drm_encoder_helper_funcs hfuncs;
41 void *params;
42
43} nv04_tv_encoder_info[] = {
44 {
45 .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
46 .params = &(struct ch7006_encoder_params) {
47 CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
48 0, 0, 0,
49 CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
50 CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
51 },
52 },
53};
54
55static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
56{
57 struct i2c_msg msg = {
58 .addr = addr,
59 .len = 0,
60 };
61
62 return i2c_transfer(adapter, &msg, 1) == 1;
63}
64
65int nv04_tv_identify(struct drm_device *dev, int i2c_index)
66{
67 struct nouveau_i2c_chan *i2c;
68 bool was_locked;
69 int i, ret;
70
71 NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
72
73 i2c = nouveau_i2c_find(dev, i2c_index);
74 if (!i2c)
75 return -ENODEV;
76
77 was_locked = NVLockVgaCrtcs(dev, false);
78
79 for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
80 if (probe_i2c_addr(&i2c->adapter,
81 nv04_tv_encoder_info[i].board_info.addr)) {
82 ret = i;
83 break;
84 }
85 }
86
87 if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
88 NV_TRACE(dev, "Detected TV encoder: %s\n",
89 nv04_tv_encoder_info[i].board_info.type);
90
91 } else {
92 NV_TRACE(dev, "No TV encoders found.\n");
93 i = -ENODEV;
94 }
95
96 NVLockVgaCrtcs(dev, was_locked);
97 return i;
98}
99
100#define PLLSEL_TV_CRTC1_MASK \
101 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
102 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
103#define PLLSEL_TV_CRTC2_MASK \
104 (NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 \
105 | NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2)
106
107static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
108{
109 struct drm_device *dev = encoder->dev;
110 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
111 struct drm_nouveau_private *dev_priv = dev->dev_private;
112 struct nv04_mode_state *state = &dev_priv->mode_reg;
113 uint8_t crtc1A;
114
115 NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
116 mode, nv_encoder->dcb->index);
117
118 state->pllsel &= ~(PLLSEL_TV_CRTC1_MASK | PLLSEL_TV_CRTC2_MASK);
119
120 if (mode == DRM_MODE_DPMS_ON) {
121 int head = nouveau_crtc(encoder->crtc)->index;
122 crtc1A = NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX);
123
124 state->pllsel |= head ? PLLSEL_TV_CRTC2_MASK :
125 PLLSEL_TV_CRTC1_MASK;
126
127 /* Inhibit hsync */
128 crtc1A |= 0x80;
129
130 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX, crtc1A);
131 }
132
133 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
134
135 to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
136}
137
138static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
139{
140 struct drm_nouveau_private *dev_priv = dev->dev_private;
141 struct nv04_crtc_reg *state = &dev_priv->mode_reg.crtc_reg[head];
142
143 state->tv_setup = 0;
144
145 if (bind) {
146 state->CRTC[NV_CIO_CRE_LCD__INDEX] = 0;
147 state->CRTC[NV_CIO_CRE_49] |= 0x10;
148 } else {
149 state->CRTC[NV_CIO_CRE_49] &= ~0x10;
150 }
151
152 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_LCD__INDEX,
153 state->CRTC[NV_CIO_CRE_LCD__INDEX]);
154 NVWriteVgaCrtc(dev, head, NV_CIO_CRE_49,
155 state->CRTC[NV_CIO_CRE_49]);
156 NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP,
157 state->tv_setup);
158}
159
160static void nv04_tv_prepare(struct drm_encoder *encoder)
161{
162 struct drm_device *dev = encoder->dev;
163 int head = nouveau_crtc(encoder->crtc)->index;
164 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
165
166 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
167
168 nv04_dfp_disable(dev, head);
169
170 if (nv_two_heads(dev))
171 nv04_tv_bind(dev, head ^ 1, false);
172
173 nv04_tv_bind(dev, head, true);
174}
175
176static void nv04_tv_mode_set(struct drm_encoder *encoder,
177 struct drm_display_mode *mode,
178 struct drm_display_mode *adjusted_mode)
179{
180 struct drm_device *dev = encoder->dev;
181 struct drm_nouveau_private *dev_priv = dev->dev_private;
182 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
183 struct nv04_crtc_reg *regp = &dev_priv->mode_reg.crtc_reg[nv_crtc->index];
184
185 regp->tv_htotal = adjusted_mode->htotal;
186 regp->tv_vtotal = adjusted_mode->vtotal;
187
188 /* These delay the TV signals with respect to the VGA port,
189 * they might be useful if we ever allow a CRTC to drive
190 * multiple outputs.
191 */
192 regp->tv_hskew = 1;
193 regp->tv_hsync_delay = 1;
194 regp->tv_hsync_delay2 = 64;
195 regp->tv_vskew = 1;
196 regp->tv_vsync_delay = 1;
197
198 to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
199}
200
201static void nv04_tv_commit(struct drm_encoder *encoder)
202{
203 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
204 struct drm_device *dev = encoder->dev;
205 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
206 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
207
208 helper->dpms(encoder, DRM_MODE_DPMS_ON);
209
210 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
211 drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index,
212 '@' + ffs(nv_encoder->dcb->or));
213}
214
215static void nv04_tv_destroy(struct drm_encoder *encoder)
216{
217 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
218
219 to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
220
221 drm_encoder_cleanup(encoder);
222
223 kfree(nv_encoder);
224}
225
226int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
227{
228 struct nouveau_encoder *nv_encoder;
229 struct drm_encoder *encoder;
230 struct drm_nouveau_private *dev_priv = dev->dev_private;
231 struct i2c_adapter *adap;
232 struct drm_encoder_funcs *funcs = NULL;
233 struct drm_encoder_helper_funcs *hfuncs = NULL;
234 struct drm_encoder_slave_funcs *sfuncs = NULL;
235 int i2c_index = entry->i2c_index;
236 int type, ret;
237 bool was_locked;
238
239 /* Ensure that we can talk to this encoder */
240 type = nv04_tv_identify(dev, i2c_index);
241 if (type < 0)
242 return type;
243
244 /* Allocate the necessary memory */
245 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
246 if (!nv_encoder)
247 return -ENOMEM;
248
249 /* Initialize the common members */
250 encoder = to_drm_encoder(nv_encoder);
251
252 funcs = &nv04_tv_encoder_info[type].funcs;
253 hfuncs = &nv04_tv_encoder_info[type].hfuncs;
254
255 drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
256 drm_encoder_helper_add(encoder, hfuncs);
257
258 encoder->possible_crtcs = entry->heads;
259 encoder->possible_clones = 0;
260
261 nv_encoder->dcb = entry;
262 nv_encoder->or = ffs(entry->or) - 1;
263
264 /* Run the slave-specific initialization */
265 adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
266
267 was_locked = NVLockVgaCrtcs(dev, false);
268
269 ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
270 &nv04_tv_encoder_info[type].board_info);
271
272 NVLockVgaCrtcs(dev, was_locked);
273
274 if (ret < 0)
275 goto fail;
276
277 /* Fill the function pointers */
278 sfuncs = to_encoder_slave(encoder)->slave_funcs;
279
280 *funcs = (struct drm_encoder_funcs) {
281 .destroy = nv04_tv_destroy,
282 };
283
284 *hfuncs = (struct drm_encoder_helper_funcs) {
285 .dpms = nv04_tv_dpms,
286 .save = sfuncs->save,
287 .restore = sfuncs->restore,
288 .mode_fixup = sfuncs->mode_fixup,
289 .prepare = nv04_tv_prepare,
290 .commit = nv04_tv_commit,
291 .mode_set = nv04_tv_mode_set,
292 .detect = sfuncs->detect,
293 };
294
295 /* Set the slave encoder configuration */
296 sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
297
298 return 0;
299
300fail:
301 drm_encoder_cleanup(encoder);
302
303 kfree(nv_encoder);
304 return ret;
305}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 000000000000..79e2d104d70a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,24 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv10_fb_init(struct drm_device *dev)
8{
9 uint32_t fb_bar_size;
10 int i;
11
12 fb_bar_size = drm_get_resource_len(dev, 0) - 1;
13 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
14 nv_wr32(dev, NV10_PFB_TILE(i), 0);
15 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
16 }
17
18 return 0;
19}
20
21void
22nv10_fb_takedown(struct drm_device *dev)
23{
24}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 000000000000..7aeabf262bc0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
32#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
33
34int
35nv10_fifo_channel_id(struct drm_device *dev)
36{
37 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
38 NV10_PFIFO_CACHE1_PUSH1_CHID_MASK;
39}
40
41int
42nv10_fifo_create_context(struct nouveau_channel *chan)
43{
44 struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
45 struct drm_device *dev = chan->dev;
46 uint32_t fc = NV10_RAMFC(chan->id);
47 int ret;
48
49 ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
50 NV10_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
51 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
52 if (ret)
53 return ret;
54
55 /* Fill entries that are seen filled in dumps of nvidia driver just
56 * after channel's is put into DMA mode
57 */
58 dev_priv->engine.instmem.prepare_access(dev, true);
59 nv_wi32(dev, fc + 0, chan->pushbuf_base);
60 nv_wi32(dev, fc + 4, chan->pushbuf_base);
61 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
62 nv_wi32(dev, fc + 20, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
63 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
64 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
65#ifdef __BIG_ENDIAN
66 NV_PFIFO_CACHE1_BIG_ENDIAN |
67#endif
68 0);
69 dev_priv->engine.instmem.finish_access(dev);
70
71 /* enable the fifo dma operation */
72 nv_wr32(dev, NV04_PFIFO_MODE,
73 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
74 return 0;
75}
76
77void
78nv10_fifo_destroy_context(struct nouveau_channel *chan)
79{
80 struct drm_device *dev = chan->dev;
81
82 nv_wr32(dev, NV04_PFIFO_MODE,
83 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
84
85 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
86}
87
88static void
89nv10_fifo_do_load_context(struct drm_device *dev, int chid)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t fc = NV10_RAMFC(chid), tmp;
93
94 dev_priv->engine.instmem.prepare_access(dev, false);
95
96 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
97 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
98 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
99
100 tmp = nv_ri32(dev, fc + 12);
101 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
102 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
103
104 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 16));
105 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 20));
106 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 24));
107 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 28));
108
109 if (dev_priv->chipset < 0x17)
110 goto out;
111
112 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 32));
113 tmp = nv_ri32(dev, fc + 36);
114 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
115 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 40));
116 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 44));
117 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
118
119out:
120 dev_priv->engine.instmem.finish_access(dev);
121
122 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
123 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
124}
125
126int
127nv10_fifo_load_context(struct nouveau_channel *chan)
128{
129 struct drm_device *dev = chan->dev;
130 uint32_t tmp;
131
132 nv10_fifo_do_load_context(dev, chan->id);
133
134 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
135 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
136 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
137
138 /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
139 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
140 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
141
142 return 0;
143}
144
145int
146nv10_fifo_unload_context(struct drm_device *dev)
147{
148 struct drm_nouveau_private *dev_priv = dev->dev_private;
149 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
150 uint32_t fc, tmp;
151 int chid;
152
153 chid = pfifo->channel_id(dev);
154 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
155 return 0;
156 fc = NV10_RAMFC(chid);
157
158 dev_priv->engine.instmem.prepare_access(dev, true);
159
160 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
161 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
162 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
163 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
164 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
165 nv_wi32(dev, fc + 12, tmp);
166 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
167 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
168 nv_wi32(dev, fc + 24, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
169 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
170
171 if (dev_priv->chipset < 0x17)
172 goto out;
173
174 nv_wi32(dev, fc + 32, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
175 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
176 nv_wi32(dev, fc + 36, tmp);
177 nv_wi32(dev, fc + 40, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
178 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
179 nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
180
181out:
182 dev_priv->engine.instmem.finish_access(dev);
183
184 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
185 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
186 return 0;
187}
188
189static void
190nv10_fifo_init_reset(struct drm_device *dev)
191{
192 nv_wr32(dev, NV03_PMC_ENABLE,
193 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
194 nv_wr32(dev, NV03_PMC_ENABLE,
195 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
196
197 nv_wr32(dev, 0x003224, 0x000f0078);
198 nv_wr32(dev, 0x002044, 0x0101ffff);
199 nv_wr32(dev, 0x002040, 0x000000ff);
200 nv_wr32(dev, 0x002500, 0x00000000);
201 nv_wr32(dev, 0x003000, 0x00000000);
202 nv_wr32(dev, 0x003050, 0x00000000);
203
204 nv_wr32(dev, 0x003258, 0x00000000);
205 nv_wr32(dev, 0x003210, 0x00000000);
206 nv_wr32(dev, 0x003270, 0x00000000);
207}
208
209static void
210nv10_fifo_init_ramxx(struct drm_device *dev)
211{
212 struct drm_nouveau_private *dev_priv = dev->dev_private;
213
214 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
215 ((dev_priv->ramht_bits - 9) << 16) |
216 (dev_priv->ramht_offset >> 8));
217 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
218
219 if (dev_priv->chipset < 0x17) {
220 nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc_offset >> 8);
221 } else {
222 nv_wr32(dev, NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset >> 8) |
223 (1 << 16) /* 64 Bytes entry*/);
224 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
225 }
226}
227
228static void
229nv10_fifo_init_intr(struct drm_device *dev)
230{
231 nv_wr32(dev, 0x002100, 0xffffffff);
232 nv_wr32(dev, 0x002140, 0xffffffff);
233}
234
235int
236nv10_fifo_init(struct drm_device *dev)
237{
238 struct drm_nouveau_private *dev_priv = dev->dev_private;
239 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
240 int i;
241
242 nv10_fifo_init_reset(dev);
243 nv10_fifo_init_ramxx(dev);
244
245 nv10_fifo_do_load_context(dev, pfifo->channels - 1);
246 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
247
248 nv10_fifo_init_intr(dev);
249 pfifo->enable(dev);
250 pfifo->reassign(dev, true);
251
252 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
253 if (dev_priv->fifos[i]) {
254 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
255 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
256 }
257 }
258
259 return 0;
260}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 000000000000..6bf6804bb0ef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,892 @@
1/*
2 * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "nouveau_drm.h"
28#include "nouveau_drv.h"
29
30#define NV10_FIFO_NUMBER 32
31
32struct pipe_state {
33 uint32_t pipe_0x0000[0x040/4];
34 uint32_t pipe_0x0040[0x010/4];
35 uint32_t pipe_0x0200[0x0c0/4];
36 uint32_t pipe_0x4400[0x080/4];
37 uint32_t pipe_0x6400[0x3b0/4];
38 uint32_t pipe_0x6800[0x2f0/4];
39 uint32_t pipe_0x6c00[0x030/4];
40 uint32_t pipe_0x7000[0x130/4];
41 uint32_t pipe_0x7400[0x0c0/4];
42 uint32_t pipe_0x7800[0x0c0/4];
43};
44
45static int nv10_graph_ctx_regs[] = {
46 NV10_PGRAPH_CTX_SWITCH1,
47 NV10_PGRAPH_CTX_SWITCH2,
48 NV10_PGRAPH_CTX_SWITCH3,
49 NV10_PGRAPH_CTX_SWITCH4,
50 NV10_PGRAPH_CTX_SWITCH5,
51 NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
52 NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
53 NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
54 NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
55 NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
56 0x00400164,
57 0x00400184,
58 0x004001a4,
59 0x004001c4,
60 0x004001e4,
61 0x00400168,
62 0x00400188,
63 0x004001a8,
64 0x004001c8,
65 0x004001e8,
66 0x0040016c,
67 0x0040018c,
68 0x004001ac,
69 0x004001cc,
70 0x004001ec,
71 0x00400170,
72 0x00400190,
73 0x004001b0,
74 0x004001d0,
75 0x004001f0,
76 0x00400174,
77 0x00400194,
78 0x004001b4,
79 0x004001d4,
80 0x004001f4,
81 0x00400178,
82 0x00400198,
83 0x004001b8,
84 0x004001d8,
85 0x004001f8,
86 0x0040017c,
87 0x0040019c,
88 0x004001bc,
89 0x004001dc,
90 0x004001fc,
91 NV10_PGRAPH_CTX_USER,
92 NV04_PGRAPH_DMA_START_0,
93 NV04_PGRAPH_DMA_START_1,
94 NV04_PGRAPH_DMA_LENGTH,
95 NV04_PGRAPH_DMA_MISC,
96 NV10_PGRAPH_DMA_PITCH,
97 NV04_PGRAPH_BOFFSET0,
98 NV04_PGRAPH_BBASE0,
99 NV04_PGRAPH_BLIMIT0,
100 NV04_PGRAPH_BOFFSET1,
101 NV04_PGRAPH_BBASE1,
102 NV04_PGRAPH_BLIMIT1,
103 NV04_PGRAPH_BOFFSET2,
104 NV04_PGRAPH_BBASE2,
105 NV04_PGRAPH_BLIMIT2,
106 NV04_PGRAPH_BOFFSET3,
107 NV04_PGRAPH_BBASE3,
108 NV04_PGRAPH_BLIMIT3,
109 NV04_PGRAPH_BOFFSET4,
110 NV04_PGRAPH_BBASE4,
111 NV04_PGRAPH_BLIMIT4,
112 NV04_PGRAPH_BOFFSET5,
113 NV04_PGRAPH_BBASE5,
114 NV04_PGRAPH_BLIMIT5,
115 NV04_PGRAPH_BPITCH0,
116 NV04_PGRAPH_BPITCH1,
117 NV04_PGRAPH_BPITCH2,
118 NV04_PGRAPH_BPITCH3,
119 NV04_PGRAPH_BPITCH4,
120 NV10_PGRAPH_SURFACE,
121 NV10_PGRAPH_STATE,
122 NV04_PGRAPH_BSWIZZLE2,
123 NV04_PGRAPH_BSWIZZLE5,
124 NV04_PGRAPH_BPIXEL,
125 NV10_PGRAPH_NOTIFY,
126 NV04_PGRAPH_PATT_COLOR0,
127 NV04_PGRAPH_PATT_COLOR1,
128 NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
129 0x00400904,
130 0x00400908,
131 0x0040090c,
132 0x00400910,
133 0x00400914,
134 0x00400918,
135 0x0040091c,
136 0x00400920,
137 0x00400924,
138 0x00400928,
139 0x0040092c,
140 0x00400930,
141 0x00400934,
142 0x00400938,
143 0x0040093c,
144 0x00400940,
145 0x00400944,
146 0x00400948,
147 0x0040094c,
148 0x00400950,
149 0x00400954,
150 0x00400958,
151 0x0040095c,
152 0x00400960,
153 0x00400964,
154 0x00400968,
155 0x0040096c,
156 0x00400970,
157 0x00400974,
158 0x00400978,
159 0x0040097c,
160 0x00400980,
161 0x00400984,
162 0x00400988,
163 0x0040098c,
164 0x00400990,
165 0x00400994,
166 0x00400998,
167 0x0040099c,
168 0x004009a0,
169 0x004009a4,
170 0x004009a8,
171 0x004009ac,
172 0x004009b0,
173 0x004009b4,
174 0x004009b8,
175 0x004009bc,
176 0x004009c0,
177 0x004009c4,
178 0x004009c8,
179 0x004009cc,
180 0x004009d0,
181 0x004009d4,
182 0x004009d8,
183 0x004009dc,
184 0x004009e0,
185 0x004009e4,
186 0x004009e8,
187 0x004009ec,
188 0x004009f0,
189 0x004009f4,
190 0x004009f8,
191 0x004009fc,
192 NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
193 0x0040080c,
194 NV04_PGRAPH_PATTERN_SHAPE,
195 NV03_PGRAPH_MONO_COLOR0,
196 NV04_PGRAPH_ROP3,
197 NV04_PGRAPH_CHROMA,
198 NV04_PGRAPH_BETA_AND,
199 NV04_PGRAPH_BETA_PREMULT,
200 0x00400e70,
201 0x00400e74,
202 0x00400e78,
203 0x00400e7c,
204 0x00400e80,
205 0x00400e84,
206 0x00400e88,
207 0x00400e8c,
208 0x00400ea0,
209 0x00400ea4,
210 0x00400ea8,
211 0x00400e90,
212 0x00400e94,
213 0x00400e98,
214 0x00400e9c,
215 NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
216 NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
217 0x00400f04,
218 0x00400f24,
219 0x00400f08,
220 0x00400f28,
221 0x00400f0c,
222 0x00400f2c,
223 0x00400f10,
224 0x00400f30,
225 0x00400f14,
226 0x00400f34,
227 0x00400f18,
228 0x00400f38,
229 0x00400f1c,
230 0x00400f3c,
231 NV10_PGRAPH_XFMODE0,
232 NV10_PGRAPH_XFMODE1,
233 NV10_PGRAPH_GLOBALSTATE0,
234 NV10_PGRAPH_GLOBALSTATE1,
235 NV04_PGRAPH_STORED_FMT,
236 NV04_PGRAPH_SOURCE_COLOR,
237 NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
238 NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
239 0x00400404,
240 0x00400484,
241 0x00400408,
242 0x00400488,
243 0x0040040c,
244 0x0040048c,
245 0x00400410,
246 0x00400490,
247 0x00400414,
248 0x00400494,
249 0x00400418,
250 0x00400498,
251 0x0040041c,
252 0x0040049c,
253 0x00400420,
254 0x004004a0,
255 0x00400424,
256 0x004004a4,
257 0x00400428,
258 0x004004a8,
259 0x0040042c,
260 0x004004ac,
261 0x00400430,
262 0x004004b0,
263 0x00400434,
264 0x004004b4,
265 0x00400438,
266 0x004004b8,
267 0x0040043c,
268 0x004004bc,
269 0x00400440,
270 0x004004c0,
271 0x00400444,
272 0x004004c4,
273 0x00400448,
274 0x004004c8,
275 0x0040044c,
276 0x004004cc,
277 0x00400450,
278 0x004004d0,
279 0x00400454,
280 0x004004d4,
281 0x00400458,
282 0x004004d8,
283 0x0040045c,
284 0x004004dc,
285 0x00400460,
286 0x004004e0,
287 0x00400464,
288 0x004004e4,
289 0x00400468,
290 0x004004e8,
291 0x0040046c,
292 0x004004ec,
293 0x00400470,
294 0x004004f0,
295 0x00400474,
296 0x004004f4,
297 0x00400478,
298 0x004004f8,
299 0x0040047c,
300 0x004004fc,
301 NV03_PGRAPH_ABS_UCLIP_XMIN,
302 NV03_PGRAPH_ABS_UCLIP_XMAX,
303 NV03_PGRAPH_ABS_UCLIP_YMIN,
304 NV03_PGRAPH_ABS_UCLIP_YMAX,
305 0x00400550,
306 0x00400558,
307 0x00400554,
308 0x0040055c,
309 NV03_PGRAPH_ABS_UCLIPA_XMIN,
310 NV03_PGRAPH_ABS_UCLIPA_XMAX,
311 NV03_PGRAPH_ABS_UCLIPA_YMIN,
312 NV03_PGRAPH_ABS_UCLIPA_YMAX,
313 NV03_PGRAPH_ABS_ICLIP_XMAX,
314 NV03_PGRAPH_ABS_ICLIP_YMAX,
315 NV03_PGRAPH_XY_LOGIC_MISC0,
316 NV03_PGRAPH_XY_LOGIC_MISC1,
317 NV03_PGRAPH_XY_LOGIC_MISC2,
318 NV03_PGRAPH_XY_LOGIC_MISC3,
319 NV03_PGRAPH_CLIPX_0,
320 NV03_PGRAPH_CLIPX_1,
321 NV03_PGRAPH_CLIPY_0,
322 NV03_PGRAPH_CLIPY_1,
323 NV10_PGRAPH_COMBINER0_IN_ALPHA,
324 NV10_PGRAPH_COMBINER1_IN_ALPHA,
325 NV10_PGRAPH_COMBINER0_IN_RGB,
326 NV10_PGRAPH_COMBINER1_IN_RGB,
327 NV10_PGRAPH_COMBINER_COLOR0,
328 NV10_PGRAPH_COMBINER_COLOR1,
329 NV10_PGRAPH_COMBINER0_OUT_ALPHA,
330 NV10_PGRAPH_COMBINER1_OUT_ALPHA,
331 NV10_PGRAPH_COMBINER0_OUT_RGB,
332 NV10_PGRAPH_COMBINER1_OUT_RGB,
333 NV10_PGRAPH_COMBINER_FINAL0,
334 NV10_PGRAPH_COMBINER_FINAL1,
335 0x00400e00,
336 0x00400e04,
337 0x00400e08,
338 0x00400e0c,
339 0x00400e10,
340 0x00400e14,
341 0x00400e18,
342 0x00400e1c,
343 0x00400e20,
344 0x00400e24,
345 0x00400e28,
346 0x00400e2c,
347 0x00400e30,
348 0x00400e34,
349 0x00400e38,
350 0x00400e3c,
351 NV04_PGRAPH_PASSTHRU_0,
352 NV04_PGRAPH_PASSTHRU_1,
353 NV04_PGRAPH_PASSTHRU_2,
354 NV10_PGRAPH_DIMX_TEXTURE,
355 NV10_PGRAPH_WDIMX_TEXTURE,
356 NV10_PGRAPH_DVD_COLORFMT,
357 NV10_PGRAPH_SCALED_FORMAT,
358 NV04_PGRAPH_MISC24_0,
359 NV04_PGRAPH_MISC24_1,
360 NV04_PGRAPH_MISC24_2,
361 NV03_PGRAPH_X_MISC,
362 NV03_PGRAPH_Y_MISC,
363 NV04_PGRAPH_VALID1,
364 NV04_PGRAPH_VALID2,
365};
366
367static int nv17_graph_ctx_regs[] = {
368 NV10_PGRAPH_DEBUG_4,
369 0x004006b0,
370 0x00400eac,
371 0x00400eb0,
372 0x00400eb4,
373 0x00400eb8,
374 0x00400ebc,
375 0x00400ec0,
376 0x00400ec4,
377 0x00400ec8,
378 0x00400ecc,
379 0x00400ed0,
380 0x00400ed4,
381 0x00400ed8,
382 0x00400edc,
383 0x00400ee0,
384 0x00400a00,
385 0x00400a04,
386};
387
388struct graph_state {
389 int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
390 int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
391 struct pipe_state pipe_state;
392};
393
394static void nv10_graph_save_pipe(struct nouveau_channel *chan)
395{
396 struct drm_device *dev = chan->dev;
397 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
398 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
399 int i;
400#define PIPE_SAVE(addr) \
401 do { \
402 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
403 for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
404 fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
405 } while (0)
406
407 PIPE_SAVE(0x4400);
408 PIPE_SAVE(0x0200);
409 PIPE_SAVE(0x6400);
410 PIPE_SAVE(0x6800);
411 PIPE_SAVE(0x6c00);
412 PIPE_SAVE(0x7000);
413 PIPE_SAVE(0x7400);
414 PIPE_SAVE(0x7800);
415 PIPE_SAVE(0x0040);
416 PIPE_SAVE(0x0000);
417
418#undef PIPE_SAVE
419}
420
421static void nv10_graph_load_pipe(struct nouveau_channel *chan)
422{
423 struct drm_device *dev = chan->dev;
424 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
425 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
426 int i;
427 uint32_t xfmode0, xfmode1;
428#define PIPE_RESTORE(addr) \
429 do { \
430 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
431 for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
432 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
433 } while (0)
434
435
436 nouveau_wait_for_idle(dev);
437 /* XXX check haiku comments */
438 xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
439 xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
440 nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
441 nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
442 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
443 for (i = 0; i < 4; i++)
444 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
445 for (i = 0; i < 4; i++)
446 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
447
448 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
449 for (i = 0; i < 3; i++)
450 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
451
452 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
453 for (i = 0; i < 3; i++)
454 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
455
456 nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
457 nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
458
459
460 PIPE_RESTORE(0x0200);
461 nouveau_wait_for_idle(dev);
462
463 /* restore XFMODE */
464 nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
465 nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
466 PIPE_RESTORE(0x6400);
467 PIPE_RESTORE(0x6800);
468 PIPE_RESTORE(0x6c00);
469 PIPE_RESTORE(0x7000);
470 PIPE_RESTORE(0x7400);
471 PIPE_RESTORE(0x7800);
472 PIPE_RESTORE(0x4400);
473 PIPE_RESTORE(0x0000);
474 PIPE_RESTORE(0x0040);
475 nouveau_wait_for_idle(dev);
476
477#undef PIPE_RESTORE
478}
479
480static void nv10_graph_create_pipe(struct nouveau_channel *chan)
481{
482 struct drm_device *dev = chan->dev;
483 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
484 struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
485 uint32_t *fifo_pipe_state_addr;
486 int i;
487#define PIPE_INIT(addr) \
488 do { \
489 fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
490 } while (0)
491#define PIPE_INIT_END(addr) \
492 do { \
493 uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
494 ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
495 if (fifo_pipe_state_addr != __end_addr) \
496 NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
497 addr, fifo_pipe_state_addr, __end_addr); \
498 } while (0)
499#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
500
501 PIPE_INIT(0x0200);
502 for (i = 0; i < 48; i++)
503 NV_WRITE_PIPE_INIT(0x00000000);
504 PIPE_INIT_END(0x0200);
505
506 PIPE_INIT(0x6400);
507 for (i = 0; i < 211; i++)
508 NV_WRITE_PIPE_INIT(0x00000000);
509 NV_WRITE_PIPE_INIT(0x3f800000);
510 NV_WRITE_PIPE_INIT(0x40000000);
511 NV_WRITE_PIPE_INIT(0x40000000);
512 NV_WRITE_PIPE_INIT(0x40000000);
513 NV_WRITE_PIPE_INIT(0x40000000);
514 NV_WRITE_PIPE_INIT(0x00000000);
515 NV_WRITE_PIPE_INIT(0x00000000);
516 NV_WRITE_PIPE_INIT(0x3f800000);
517 NV_WRITE_PIPE_INIT(0x00000000);
518 NV_WRITE_PIPE_INIT(0x3f000000);
519 NV_WRITE_PIPE_INIT(0x3f000000);
520 NV_WRITE_PIPE_INIT(0x00000000);
521 NV_WRITE_PIPE_INIT(0x00000000);
522 NV_WRITE_PIPE_INIT(0x00000000);
523 NV_WRITE_PIPE_INIT(0x00000000);
524 NV_WRITE_PIPE_INIT(0x3f800000);
525 NV_WRITE_PIPE_INIT(0x00000000);
526 NV_WRITE_PIPE_INIT(0x00000000);
527 NV_WRITE_PIPE_INIT(0x00000000);
528 NV_WRITE_PIPE_INIT(0x00000000);
529 NV_WRITE_PIPE_INIT(0x00000000);
530 NV_WRITE_PIPE_INIT(0x3f800000);
531 NV_WRITE_PIPE_INIT(0x3f800000);
532 NV_WRITE_PIPE_INIT(0x3f800000);
533 NV_WRITE_PIPE_INIT(0x3f800000);
534 PIPE_INIT_END(0x6400);
535
536 PIPE_INIT(0x6800);
537 for (i = 0; i < 162; i++)
538 NV_WRITE_PIPE_INIT(0x00000000);
539 NV_WRITE_PIPE_INIT(0x3f800000);
540 for (i = 0; i < 25; i++)
541 NV_WRITE_PIPE_INIT(0x00000000);
542 PIPE_INIT_END(0x6800);
543
544 PIPE_INIT(0x6c00);
545 NV_WRITE_PIPE_INIT(0x00000000);
546 NV_WRITE_PIPE_INIT(0x00000000);
547 NV_WRITE_PIPE_INIT(0x00000000);
548 NV_WRITE_PIPE_INIT(0x00000000);
549 NV_WRITE_PIPE_INIT(0xbf800000);
550 NV_WRITE_PIPE_INIT(0x00000000);
551 NV_WRITE_PIPE_INIT(0x00000000);
552 NV_WRITE_PIPE_INIT(0x00000000);
553 NV_WRITE_PIPE_INIT(0x00000000);
554 NV_WRITE_PIPE_INIT(0x00000000);
555 NV_WRITE_PIPE_INIT(0x00000000);
556 NV_WRITE_PIPE_INIT(0x00000000);
557 PIPE_INIT_END(0x6c00);
558
559 PIPE_INIT(0x7000);
560 NV_WRITE_PIPE_INIT(0x00000000);
561 NV_WRITE_PIPE_INIT(0x00000000);
562 NV_WRITE_PIPE_INIT(0x00000000);
563 NV_WRITE_PIPE_INIT(0x00000000);
564 NV_WRITE_PIPE_INIT(0x00000000);
565 NV_WRITE_PIPE_INIT(0x00000000);
566 NV_WRITE_PIPE_INIT(0x00000000);
567 NV_WRITE_PIPE_INIT(0x00000000);
568 NV_WRITE_PIPE_INIT(0x00000000);
569 NV_WRITE_PIPE_INIT(0x00000000);
570 NV_WRITE_PIPE_INIT(0x00000000);
571 NV_WRITE_PIPE_INIT(0x00000000);
572 NV_WRITE_PIPE_INIT(0x7149f2ca);
573 NV_WRITE_PIPE_INIT(0x00000000);
574 NV_WRITE_PIPE_INIT(0x00000000);
575 NV_WRITE_PIPE_INIT(0x00000000);
576 NV_WRITE_PIPE_INIT(0x7149f2ca);
577 NV_WRITE_PIPE_INIT(0x00000000);
578 NV_WRITE_PIPE_INIT(0x00000000);
579 NV_WRITE_PIPE_INIT(0x00000000);
580 NV_WRITE_PIPE_INIT(0x7149f2ca);
581 NV_WRITE_PIPE_INIT(0x00000000);
582 NV_WRITE_PIPE_INIT(0x00000000);
583 NV_WRITE_PIPE_INIT(0x00000000);
584 NV_WRITE_PIPE_INIT(0x7149f2ca);
585 NV_WRITE_PIPE_INIT(0x00000000);
586 NV_WRITE_PIPE_INIT(0x00000000);
587 NV_WRITE_PIPE_INIT(0x00000000);
588 NV_WRITE_PIPE_INIT(0x7149f2ca);
589 NV_WRITE_PIPE_INIT(0x00000000);
590 NV_WRITE_PIPE_INIT(0x00000000);
591 NV_WRITE_PIPE_INIT(0x00000000);
592 NV_WRITE_PIPE_INIT(0x7149f2ca);
593 NV_WRITE_PIPE_INIT(0x00000000);
594 NV_WRITE_PIPE_INIT(0x00000000);
595 NV_WRITE_PIPE_INIT(0x00000000);
596 NV_WRITE_PIPE_INIT(0x7149f2ca);
597 NV_WRITE_PIPE_INIT(0x00000000);
598 NV_WRITE_PIPE_INIT(0x00000000);
599 NV_WRITE_PIPE_INIT(0x00000000);
600 NV_WRITE_PIPE_INIT(0x7149f2ca);
601 for (i = 0; i < 35; i++)
602 NV_WRITE_PIPE_INIT(0x00000000);
603 PIPE_INIT_END(0x7000);
604
605 PIPE_INIT(0x7400);
606 for (i = 0; i < 48; i++)
607 NV_WRITE_PIPE_INIT(0x00000000);
608 PIPE_INIT_END(0x7400);
609
610 PIPE_INIT(0x7800);
611 for (i = 0; i < 48; i++)
612 NV_WRITE_PIPE_INIT(0x00000000);
613 PIPE_INIT_END(0x7800);
614
615 PIPE_INIT(0x4400);
616 for (i = 0; i < 32; i++)
617 NV_WRITE_PIPE_INIT(0x00000000);
618 PIPE_INIT_END(0x4400);
619
620 PIPE_INIT(0x0000);
621 for (i = 0; i < 16; i++)
622 NV_WRITE_PIPE_INIT(0x00000000);
623 PIPE_INIT_END(0x0000);
624
625 PIPE_INIT(0x0040);
626 for (i = 0; i < 4; i++)
627 NV_WRITE_PIPE_INIT(0x00000000);
628 PIPE_INIT_END(0x0040);
629
630#undef PIPE_INIT
631#undef PIPE_INIT_END
632#undef NV_WRITE_PIPE_INIT
633}
634
635static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
636{
637 int i;
638 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
639 if (nv10_graph_ctx_regs[i] == reg)
640 return i;
641 }
642 NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
643 return -1;
644}
645
646static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
647{
648 int i;
649 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
650 if (nv17_graph_ctx_regs[i] == reg)
651 return i;
652 }
653 NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
654 return -1;
655}
656
657int nv10_graph_load_context(struct nouveau_channel *chan)
658{
659 struct drm_device *dev = chan->dev;
660 struct drm_nouveau_private *dev_priv = dev->dev_private;
661 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
662 uint32_t tmp;
663 int i;
664
665 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
666 nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
667 if (dev_priv->chipset >= 0x17) {
668 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
669 nv_wr32(dev, nv17_graph_ctx_regs[i],
670 pgraph_ctx->nv17[i]);
671 }
672
673 nv10_graph_load_pipe(chan);
674
675 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
676 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
677 nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
678 tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
679 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
680 return 0;
681}
682
683int
684nv10_graph_unload_context(struct drm_device *dev)
685{
686 struct drm_nouveau_private *dev_priv = dev->dev_private;
687 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
688 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
689 struct nouveau_channel *chan;
690 struct graph_state *ctx;
691 uint32_t tmp;
692 int i;
693
694 chan = pgraph->channel(dev);
695 if (!chan)
696 return 0;
697 ctx = chan->pgraph_ctx;
698
699 for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
700 ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
701
702 if (dev_priv->chipset >= 0x17) {
703 for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
704 ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
705 }
706
707 nv10_graph_save_pipe(chan);
708
709 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
710 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
711 tmp |= (pfifo->channels - 1) << 24;
712 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
713 return 0;
714}
715
716void
717nv10_graph_context_switch(struct drm_device *dev)
718{
719 struct drm_nouveau_private *dev_priv = dev->dev_private;
720 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
721 struct nouveau_channel *chan = NULL;
722 int chid;
723
724 pgraph->fifo_access(dev, false);
725 nouveau_wait_for_idle(dev);
726
727 /* If previous context is valid, we need to save it */
728 nv10_graph_unload_context(dev);
729
730 /* Load context for next channel */
731 chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
732 chan = dev_priv->fifos[chid];
733 if (chan)
734 nv10_graph_load_context(chan);
735
736 pgraph->fifo_access(dev, true);
737}
738
739#define NV_WRITE_CTX(reg, val) do { \
740 int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
741 if (offset > 0) \
742 pgraph_ctx->nv10[offset] = val; \
743 } while (0)
744
745#define NV17_WRITE_CTX(reg, val) do { \
746 int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
747 if (offset > 0) \
748 pgraph_ctx->nv17[offset] = val; \
749 } while (0)
750
751struct nouveau_channel *
752nv10_graph_channel(struct drm_device *dev)
753{
754 struct drm_nouveau_private *dev_priv = dev->dev_private;
755 int chid = dev_priv->engine.fifo.channels;
756
757 if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
758 chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
759
760 if (chid >= dev_priv->engine.fifo.channels)
761 return NULL;
762
763 return dev_priv->fifos[chid];
764}
765
766int nv10_graph_create_context(struct nouveau_channel *chan)
767{
768 struct drm_device *dev = chan->dev;
769 struct drm_nouveau_private *dev_priv = dev->dev_private;
770 struct graph_state *pgraph_ctx;
771
772 NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
773
774 chan->pgraph_ctx = pgraph_ctx = kzalloc(sizeof(*pgraph_ctx),
775 GFP_KERNEL);
776 if (pgraph_ctx == NULL)
777 return -ENOMEM;
778
779
780 NV_WRITE_CTX(0x00400e88, 0x08000000);
781 NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
782 NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
783 NV_WRITE_CTX(0x00400e10, 0x00001000);
784 NV_WRITE_CTX(0x00400e14, 0x00001000);
785 NV_WRITE_CTX(0x00400e30, 0x00080008);
786 NV_WRITE_CTX(0x00400e34, 0x00080008);
787 if (dev_priv->chipset >= 0x17) {
788 /* is it really needed ??? */
789 NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
790 nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
791 NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
792 NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
793 NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
794 NV17_WRITE_CTX(0x00400ec0, 0x00000080);
795 NV17_WRITE_CTX(0x00400ed0, 0x00000080);
796 }
797 NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
798
799 nv10_graph_create_pipe(chan);
800 return 0;
801}
802
803void nv10_graph_destroy_context(struct nouveau_channel *chan)
804{
805 struct graph_state *pgraph_ctx = chan->pgraph_ctx;
806
807 kfree(pgraph_ctx);
808 chan->pgraph_ctx = NULL;
809}
810
811int nv10_graph_init(struct drm_device *dev)
812{
813 struct drm_nouveau_private *dev_priv = dev->dev_private;
814 uint32_t tmp;
815 int i;
816
817 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
818 ~NV_PMC_ENABLE_PGRAPH);
819 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
820 NV_PMC_ENABLE_PGRAPH);
821
822 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
823 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
824
825 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
826 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
827 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
828 /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
829 nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
830 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
831 (1<<29) |
832 (1<<31));
833 if (dev_priv->chipset >= 0x17) {
834 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
835 nv_wr32(dev, 0x004006b0, 0x40000020);
836 } else
837 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
838
839 /* copy tile info from PFB */
840 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
841 nv_wr32(dev, NV10_PGRAPH_TILE(i),
842 nv_rd32(dev, NV10_PFB_TILE(i)));
843 nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
844 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
845 nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
846 nv_rd32(dev, NV10_PFB_TSIZE(i)));
847 nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
848 nv_rd32(dev, NV10_PFB_TSTATUS(i)));
849 }
850
851 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
852 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
853 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
854 nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
855 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
856
857 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
858 tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
859 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
860 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
861 nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
862
863 return 0;
864}
865
866void nv10_graph_takedown(struct drm_device *dev)
867{
868}
869
870struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
871 { 0x0030, false, NULL }, /* null */
872 { 0x0039, false, NULL }, /* m2mf */
873 { 0x004a, false, NULL }, /* gdirect */
874 { 0x005f, false, NULL }, /* imageblit */
875 { 0x009f, false, NULL }, /* imageblit (nv12) */
876 { 0x008a, false, NULL }, /* ifc */
877 { 0x0089, false, NULL }, /* sifm */
878 { 0x0062, false, NULL }, /* surf2d */
879 { 0x0043, false, NULL }, /* rop */
880 { 0x0012, false, NULL }, /* beta1 */
881 { 0x0072, false, NULL }, /* beta4 */
882 { 0x0019, false, NULL }, /* cliprect */
883 { 0x0044, false, NULL }, /* pattern */
884 { 0x0052, false, NULL }, /* swzsurf */
885 { 0x0093, false, NULL }, /* surf3d */
886 { 0x0094, false, NULL }, /* tex_tri */
887 { 0x0095, false, NULL }, /* multitex_tri */
888 { 0x0056, false, NULL }, /* celcius (nv10) */
889 { 0x0096, false, NULL }, /* celcius (nv11) */
890 { 0x0099, false, NULL }, /* celcius (nv17) */
891 {}
892};
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv17_gpio.c
new file mode 100644
index 000000000000..2e58c331e9b7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_gpio.c
@@ -0,0 +1,92 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_hw.h"
30
31static bool
32get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
33 uint32_t *mask)
34{
35 if (ent->line < 2) {
36 *reg = NV_PCRTC_GPIO;
37 *shift = ent->line * 16;
38 *mask = 0x11;
39
40 } else if (ent->line < 10) {
41 *reg = NV_PCRTC_GPIO_EXT;
42 *shift = (ent->line - 2) * 4;
43 *mask = 0x3;
44
45 } else if (ent->line < 14) {
46 *reg = NV_PCRTC_850;
47 *shift = (ent->line - 10) * 4;
48 *mask = 0x3;
49
50 } else {
51 return false;
52 }
53
54 return true;
55}
56
57int
58nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
59{
60 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
61 uint32_t reg, shift, mask, value;
62
63 if (!ent)
64 return -ENODEV;
65
66 if (!get_gpio_location(ent, &reg, &shift, &mask))
67 return -ENODEV;
68
69 value = NVReadCRTC(dev, 0, reg) >> shift;
70
71 return (ent->invert ? 1 : 0) ^ (value & 1);
72}
73
74int
75nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
76{
77 struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
78 uint32_t reg, shift, mask, value;
79
80 if (!ent)
81 return -ENODEV;
82
83 if (!get_gpio_location(ent, &reg, &shift, &mask))
84 return -ENODEV;
85
86 value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
87 mask = ~(mask << shift);
88
89 NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
90
91 return 0;
92}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
new file mode 100644
index 000000000000..46cfd9c60478
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -0,0 +1,681 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_connector.h"
32#include "nouveau_crtc.h"
33#include "nouveau_hw.h"
34#include "nv17_tv.h"
35
36enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
37 struct drm_connector *connector,
38 uint32_t pin_mask)
39{
40 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
41
42 tv_enc->pin_mask = pin_mask >> 28 & 0xe;
43
44 switch (tv_enc->pin_mask) {
45 case 0x2:
46 case 0x4:
47 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite;
48 break;
49 case 0xc:
50 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
51 break;
52 case 0xe:
53 if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
54 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
55 else
56 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
57 break;
58 default:
59 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
60 break;
61 }
62
63 drm_connector_property_set_value(connector,
64 encoder->dev->mode_config.tv_subconnector_property,
65 tv_enc->subconnector);
66
67 return tv_enc->subconnector ? connector_status_connected :
68 connector_status_disconnected;
69}
70
71static const struct {
72 int hdisplay;
73 int vdisplay;
74} modes[] = {
75 { 640, 400 },
76 { 640, 480 },
77 { 720, 480 },
78 { 720, 576 },
79 { 800, 600 },
80 { 1024, 768 },
81 { 1280, 720 },
82 { 1280, 1024 },
83 { 1920, 1080 }
84};
85
86static int nv17_tv_get_modes(struct drm_encoder *encoder,
87 struct drm_connector *connector)
88{
89 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
90 struct drm_display_mode *mode;
91 struct drm_display_mode *output_mode;
92 int n = 0;
93 int i;
94
95 if (tv_norm->kind != CTV_ENC_MODE) {
96 struct drm_display_mode *tv_mode;
97
98 for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) {
99 mode = drm_mode_duplicate(encoder->dev, tv_mode);
100
101 mode->clock = tv_norm->tv_enc_mode.vrefresh *
102 mode->htotal / 1000 *
103 mode->vtotal / 1000;
104
105 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
106 mode->clock *= 2;
107
108 if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay &&
109 mode->vdisplay == tv_norm->tv_enc_mode.vdisplay)
110 mode->type |= DRM_MODE_TYPE_PREFERRED;
111
112 drm_mode_probed_add(connector, mode);
113 n++;
114 }
115 return n;
116 }
117
118 /* tv_norm->kind == CTV_ENC_MODE */
119 output_mode = &tv_norm->ctv_enc_mode.mode;
120 for (i = 0; i < ARRAY_SIZE(modes); i++) {
121 if (modes[i].hdisplay > output_mode->hdisplay ||
122 modes[i].vdisplay > output_mode->vdisplay)
123 continue;
124
125 if (modes[i].hdisplay == output_mode->hdisplay &&
126 modes[i].vdisplay == output_mode->vdisplay) {
127 mode = drm_mode_duplicate(encoder->dev, output_mode);
128 mode->type |= DRM_MODE_TYPE_PREFERRED;
129 } else {
130 mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay,
131 modes[i].vdisplay, 60, false,
132 output_mode->flags & DRM_MODE_FLAG_INTERLACE,
133 false);
134 }
135
136 /* CVT modes are sometimes unsuitable... */
137 if (output_mode->hdisplay <= 720
138 || output_mode->hdisplay >= 1920) {
139 mode->htotal = output_mode->htotal;
140 mode->hsync_start = (mode->hdisplay + (mode->htotal
141 - mode->hdisplay) * 9 / 10) & ~7;
142 mode->hsync_end = mode->hsync_start + 8;
143 }
144 if (output_mode->vdisplay >= 1024) {
145 mode->vtotal = output_mode->vtotal;
146 mode->vsync_start = output_mode->vsync_start;
147 mode->vsync_end = output_mode->vsync_end;
148 }
149
150 mode->type |= DRM_MODE_TYPE_DRIVER;
151 drm_mode_probed_add(connector, mode);
152 n++;
153 }
154 return n;
155}
156
157static int nv17_tv_mode_valid(struct drm_encoder *encoder,
158 struct drm_display_mode *mode)
159{
160 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
161
162 if (tv_norm->kind == CTV_ENC_MODE) {
163 struct drm_display_mode *output_mode =
164 &tv_norm->ctv_enc_mode.mode;
165
166 if (mode->clock > 400000)
167 return MODE_CLOCK_HIGH;
168
169 if (mode->hdisplay > output_mode->hdisplay ||
170 mode->vdisplay > output_mode->vdisplay)
171 return MODE_BAD;
172
173 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) !=
174 (output_mode->flags & DRM_MODE_FLAG_INTERLACE))
175 return MODE_NO_INTERLACE;
176
177 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
178 return MODE_NO_DBLESCAN;
179
180 } else {
181 const int vsync_tolerance = 600;
182
183 if (mode->clock > 70000)
184 return MODE_CLOCK_HIGH;
185
186 if (abs(drm_mode_vrefresh(mode) * 1000 -
187 tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance)
188 return MODE_VSYNC;
189
190 /* The encoder takes care of the actual interlacing */
191 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
192 return MODE_NO_INTERLACE;
193 }
194
195 return MODE_OK;
196}
197
198static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
199 struct drm_display_mode *mode,
200 struct drm_display_mode *adjusted_mode)
201{
202 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
203
204 if (tv_norm->kind == CTV_ENC_MODE)
205 adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
206 else
207 adjusted_mode->clock = 90000;
208
209 return true;
210}
211
212static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
213{
214 struct drm_device *dev = encoder->dev;
215 struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
216 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
217
218 if (nouveau_encoder(encoder)->last_dpms == mode)
219 return;
220 nouveau_encoder(encoder)->last_dpms = mode;
221
222 NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
223 mode, nouveau_encoder(encoder)->dcb->index);
224
225 regs->ptv_200 &= ~1;
226
227 if (tv_norm->kind == CTV_ENC_MODE) {
228 nv04_dfp_update_fp_control(encoder, mode);
229
230 } else {
231 nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF);
232
233 if (mode == DRM_MODE_DPMS_ON)
234 regs->ptv_200 |= 1;
235 }
236
237 nv_load_ptv(dev, regs, 200);
238
239 nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
240 nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
241
242 nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
243}
244
245static void nv17_tv_prepare(struct drm_encoder *encoder)
246{
247 struct drm_device *dev = encoder->dev;
248 struct drm_nouveau_private *dev_priv = dev->dev_private;
249 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
250 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
251 int head = nouveau_crtc(encoder->crtc)->index;
252 uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[
253 NV_CIO_CRE_LCD__INDEX];
254 uint32_t dacclk_off = NV_PRAMDAC_DACCLK +
255 nv04_dac_output_offset(encoder);
256 uint32_t dacclk;
257
258 helper->dpms(encoder, DRM_MODE_DPMS_OFF);
259
260 nv04_dfp_disable(dev, head);
261
262 /* Unbind any FP encoders from this head if we need the FP
263 * stuff enabled. */
264 if (tv_norm->kind == CTV_ENC_MODE) {
265 struct drm_encoder *enc;
266
267 list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
268 struct dcb_entry *dcb = nouveau_encoder(enc)->dcb;
269
270 if ((dcb->type == OUTPUT_TMDS ||
271 dcb->type == OUTPUT_LVDS) &&
272 !enc->crtc &&
273 nv04_dfp_get_bound_head(dev, dcb) == head) {
274 nv04_dfp_bind_head(dev, dcb, head ^ 1,
275 dev_priv->VBIOS.fp.dual_link);
276 }
277 }
278
279 }
280
281 /* Some NV4x have unknown values (0x3f, 0x50, 0x54, 0x6b, 0x79, 0x7f)
282 * at LCD__INDEX which we don't alter
283 */
284 if (!(*cr_lcd & 0x44)) {
285 if (tv_norm->kind == CTV_ENC_MODE)
286 *cr_lcd = 0x1 | (head ? 0x0 : 0x8);
287 else
288 *cr_lcd = 0;
289 }
290
291 /* Set the DACCLK register */
292 dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
293
294 if (dev_priv->card_type == NV_40)
295 dacclk |= 0x1a << 16;
296
297 if (tv_norm->kind == CTV_ENC_MODE) {
298 dacclk |= 0x20;
299
300 if (head)
301 dacclk |= 0x100;
302 else
303 dacclk &= ~0x100;
304
305 } else {
306 dacclk |= 0x10;
307
308 }
309
310 NVWriteRAMDAC(dev, 0, dacclk_off, dacclk);
311}
312
313static void nv17_tv_mode_set(struct drm_encoder *encoder,
314 struct drm_display_mode *drm_mode,
315 struct drm_display_mode *adjusted_mode)
316{
317 struct drm_device *dev = encoder->dev;
318 struct drm_nouveau_private *dev_priv = dev->dev_private;
319 int head = nouveau_crtc(encoder->crtc)->index;
320 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
321 struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state;
322 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
323 int i;
324
325 regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */
326 regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */
327 regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */
328 regs->tv_setup = 1;
329 regs->ramdac_8c0 = 0x0;
330
331 if (tv_norm->kind == TV_ENC_MODE) {
332 tv_regs->ptv_200 = 0x13111100;
333 if (head)
334 tv_regs->ptv_200 |= 0x10;
335
336 tv_regs->ptv_20c = 0x808010;
337 tv_regs->ptv_304 = 0x2d00000;
338 tv_regs->ptv_600 = 0x0;
339 tv_regs->ptv_60c = 0x0;
340 tv_regs->ptv_610 = 0x1e00000;
341
342 if (tv_norm->tv_enc_mode.vdisplay == 576) {
343 tv_regs->ptv_508 = 0x1200000;
344 tv_regs->ptv_614 = 0x33;
345
346 } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
347 tv_regs->ptv_508 = 0xf00000;
348 tv_regs->ptv_614 = 0x13;
349 }
350
351 if (dev_priv->card_type >= NV_30) {
352 tv_regs->ptv_500 = 0xe8e0;
353 tv_regs->ptv_504 = 0x1710;
354 tv_regs->ptv_604 = 0x0;
355 tv_regs->ptv_608 = 0x0;
356 } else {
357 if (tv_norm->tv_enc_mode.vdisplay == 576) {
358 tv_regs->ptv_604 = 0x20;
359 tv_regs->ptv_608 = 0x10;
360 tv_regs->ptv_500 = 0x19710;
361 tv_regs->ptv_504 = 0x68f0;
362
363 } else if (tv_norm->tv_enc_mode.vdisplay == 480) {
364 tv_regs->ptv_604 = 0x10;
365 tv_regs->ptv_608 = 0x20;
366 tv_regs->ptv_500 = 0x4b90;
367 tv_regs->ptv_504 = 0x1b480;
368 }
369 }
370
371 for (i = 0; i < 0x40; i++)
372 tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i];
373
374 } else {
375 struct drm_display_mode *output_mode =
376 &tv_norm->ctv_enc_mode.mode;
377
378 /* The registers in PRAMDAC+0xc00 control some timings and CSC
379 * parameters for the CTV encoder (It's only used for "HD" TV
380 * modes, I don't think I have enough working to guess what
381 * they exactly mean...), it's probably connected at the
382 * output of the FP encoder, but it also needs the analog
383 * encoder in its OR enabled and routed to the head it's
384 * using. It's enabled with the DACCLK register, bits [5:4].
385 */
386 for (i = 0; i < 38; i++)
387 regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i];
388
389 regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1;
390 regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
391 regs->fp_horiz_regs[FP_SYNC_START] =
392 output_mode->hsync_start - 1;
393 regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
394 regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay +
395 max((output_mode->hdisplay-600)/40 - 1, 1);
396
397 regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1;
398 regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1;
399 regs->fp_vert_regs[FP_SYNC_START] =
400 output_mode->vsync_start - 1;
401 regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1;
402 regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1;
403
404 regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
405 NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
406 NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12;
407
408 if (output_mode->flags & DRM_MODE_FLAG_PVSYNC)
409 regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS;
410 if (output_mode->flags & DRM_MODE_FLAG_PHSYNC)
411 regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS;
412
413 regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND |
414 NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND |
415 NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR |
416 NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR |
417 NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED |
418 NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE |
419 NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE;
420
421 regs->fp_debug_2 = 0;
422
423 regs->fp_margin_color = 0x801080;
424
425 }
426}
427
428static void nv17_tv_commit(struct drm_encoder *encoder)
429{
430 struct drm_device *dev = encoder->dev;
431 struct drm_nouveau_private *dev_priv = dev->dev_private;
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
433 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
434 struct drm_encoder_helper_funcs *helper = encoder->helper_private;
435
436 if (get_tv_norm(encoder)->kind == TV_ENC_MODE) {
437 nv17_tv_update_rescaler(encoder);
438 nv17_tv_update_properties(encoder);
439 } else {
440 nv17_ctv_update_rescaler(encoder);
441 }
442
443 nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
444
445 /* This could use refinement for flatpanels, but it should work */
446 if (dev_priv->chipset < 0x44)
447 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
448 nv04_dac_output_offset(encoder),
449 0xf0000000);
450 else
451 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
452 nv04_dac_output_offset(encoder),
453 0x00100000);
454
455 helper->dpms(encoder, DRM_MODE_DPMS_ON);
456
457 NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
458 drm_get_connector_name(
459 &nouveau_encoder_connector_get(nv_encoder)->base),
460 nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
461}
462
463static void nv17_tv_save(struct drm_encoder *encoder)
464{
465 struct drm_device *dev = encoder->dev;
466 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
467
468 nouveau_encoder(encoder)->restore.output =
469 NVReadRAMDAC(dev, 0,
470 NV_PRAMDAC_DACCLK +
471 nv04_dac_output_offset(encoder));
472
473 nv17_tv_state_save(dev, &tv_enc->saved_state);
474
475 tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200;
476}
477
478static void nv17_tv_restore(struct drm_encoder *encoder)
479{
480 struct drm_device *dev = encoder->dev;
481
482 NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK +
483 nv04_dac_output_offset(encoder),
484 nouveau_encoder(encoder)->restore.output);
485
486 nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state);
487}
488
489static int nv17_tv_create_resources(struct drm_encoder *encoder,
490 struct drm_connector *connector)
491{
492 struct drm_device *dev = encoder->dev;
493 struct drm_mode_config *conf = &dev->mode_config;
494 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
495 struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
496 int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS :
497 NUM_LD_TV_NORMS;
498 int i;
499
500 if (nouveau_tv_norm) {
501 for (i = 0; i < num_tv_norms; i++) {
502 if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) {
503 tv_enc->tv_norm = i;
504 break;
505 }
506 }
507
508 if (i == num_tv_norms)
509 NV_WARN(dev, "Invalid TV norm setting \"%s\"\n",
510 nouveau_tv_norm);
511 }
512
513 drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names);
514
515 drm_connector_attach_property(connector,
516 conf->tv_select_subconnector_property,
517 tv_enc->select_subconnector);
518 drm_connector_attach_property(connector,
519 conf->tv_subconnector_property,
520 tv_enc->subconnector);
521 drm_connector_attach_property(connector,
522 conf->tv_mode_property,
523 tv_enc->tv_norm);
524 drm_connector_attach_property(connector,
525 conf->tv_flicker_reduction_property,
526 tv_enc->flicker);
527 drm_connector_attach_property(connector,
528 conf->tv_saturation_property,
529 tv_enc->saturation);
530 drm_connector_attach_property(connector,
531 conf->tv_hue_property,
532 tv_enc->hue);
533 drm_connector_attach_property(connector,
534 conf->tv_overscan_property,
535 tv_enc->overscan);
536
537 return 0;
538}
539
540static int nv17_tv_set_property(struct drm_encoder *encoder,
541 struct drm_connector *connector,
542 struct drm_property *property,
543 uint64_t val)
544{
545 struct drm_mode_config *conf = &encoder->dev->mode_config;
546 struct drm_crtc *crtc = encoder->crtc;
547 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
548 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
549 bool modes_changed = false;
550
551 if (property == conf->tv_overscan_property) {
552 tv_enc->overscan = val;
553 if (encoder->crtc) {
554 if (tv_norm->kind == CTV_ENC_MODE)
555 nv17_ctv_update_rescaler(encoder);
556 else
557 nv17_tv_update_rescaler(encoder);
558 }
559
560 } else if (property == conf->tv_saturation_property) {
561 if (tv_norm->kind != TV_ENC_MODE)
562 return -EINVAL;
563
564 tv_enc->saturation = val;
565 nv17_tv_update_properties(encoder);
566
567 } else if (property == conf->tv_hue_property) {
568 if (tv_norm->kind != TV_ENC_MODE)
569 return -EINVAL;
570
571 tv_enc->hue = val;
572 nv17_tv_update_properties(encoder);
573
574 } else if (property == conf->tv_flicker_reduction_property) {
575 if (tv_norm->kind != TV_ENC_MODE)
576 return -EINVAL;
577
578 tv_enc->flicker = val;
579 if (encoder->crtc)
580 nv17_tv_update_rescaler(encoder);
581
582 } else if (property == conf->tv_mode_property) {
583 if (connector->dpms != DRM_MODE_DPMS_OFF)
584 return -EINVAL;
585
586 tv_enc->tv_norm = val;
587
588 modes_changed = true;
589
590 } else if (property == conf->tv_select_subconnector_property) {
591 if (tv_norm->kind != TV_ENC_MODE)
592 return -EINVAL;
593
594 tv_enc->select_subconnector = val;
595 nv17_tv_update_properties(encoder);
596
597 } else {
598 return -EINVAL;
599 }
600
601 if (modes_changed) {
602 drm_helper_probe_single_connector_modes(connector, 0, 0);
603
604 /* Disable the crtc to ensure a full modeset is
605 * performed whenever it's turned on again. */
606 if (crtc) {
607 struct drm_mode_set modeset = {
608 .crtc = crtc,
609 };
610
611 crtc->funcs->set_config(&modeset);
612 }
613 }
614
615 return 0;
616}
617
618static void nv17_tv_destroy(struct drm_encoder *encoder)
619{
620 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
621
622 NV_DEBUG(encoder->dev, "\n");
623
624 drm_encoder_cleanup(encoder);
625 kfree(tv_enc);
626}
627
628static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
629 .dpms = nv17_tv_dpms,
630 .save = nv17_tv_save,
631 .restore = nv17_tv_restore,
632 .mode_fixup = nv17_tv_mode_fixup,
633 .prepare = nv17_tv_prepare,
634 .commit = nv17_tv_commit,
635 .mode_set = nv17_tv_mode_set,
636 .detect = nv17_dac_detect,
637};
638
639static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
640 .get_modes = nv17_tv_get_modes,
641 .mode_valid = nv17_tv_mode_valid,
642 .create_resources = nv17_tv_create_resources,
643 .set_property = nv17_tv_set_property,
644};
645
646static struct drm_encoder_funcs nv17_tv_funcs = {
647 .destroy = nv17_tv_destroy,
648};
649
650int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
651{
652 struct drm_encoder *encoder;
653 struct nv17_tv_encoder *tv_enc = NULL;
654
655 tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL);
656 if (!tv_enc)
657 return -ENOMEM;
658
659 tv_enc->overscan = 50;
660 tv_enc->flicker = 50;
661 tv_enc->saturation = 50;
662 tv_enc->hue = 0;
663 tv_enc->tv_norm = TV_NORM_PAL;
664 tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
665 tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic;
666 tv_enc->pin_mask = 0;
667
668 encoder = to_drm_encoder(&tv_enc->base);
669
670 tv_enc->base.dcb = entry;
671 tv_enc->base.or = ffs(entry->or) - 1;
672
673 drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC);
674 drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs);
675 to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs;
676
677 encoder->possible_crtcs = entry->heads;
678 encoder->possible_clones = 0;
679
680 return 0;
681}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.h b/drivers/gpu/drm/nouveau/nv17_tv.h
new file mode 100644
index 000000000000..c00977cedabd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV17_TV_H__
28#define __NV17_TV_H__
29
30struct nv17_tv_state {
31 uint8_t tv_enc[0x40];
32
33 uint32_t hfilter[4][7];
34 uint32_t hfilter2[4][7];
35 uint32_t vfilter[4][7];
36
37 uint32_t ptv_200;
38 uint32_t ptv_204;
39 uint32_t ptv_208;
40 uint32_t ptv_20c;
41 uint32_t ptv_304;
42 uint32_t ptv_500;
43 uint32_t ptv_504;
44 uint32_t ptv_508;
45 uint32_t ptv_600;
46 uint32_t ptv_604;
47 uint32_t ptv_608;
48 uint32_t ptv_60c;
49 uint32_t ptv_610;
50 uint32_t ptv_614;
51};
52
53enum nv17_tv_norm{
54 TV_NORM_PAL,
55 TV_NORM_PAL_M,
56 TV_NORM_PAL_N,
57 TV_NORM_PAL_NC,
58 TV_NORM_NTSC_M,
59 TV_NORM_NTSC_J,
60 NUM_LD_TV_NORMS,
61 TV_NORM_HD480I = NUM_LD_TV_NORMS,
62 TV_NORM_HD480P,
63 TV_NORM_HD576I,
64 TV_NORM_HD576P,
65 TV_NORM_HD720P,
66 TV_NORM_HD1080I,
67 NUM_TV_NORMS
68};
69
70struct nv17_tv_encoder {
71 struct nouveau_encoder base;
72
73 struct nv17_tv_state state;
74 struct nv17_tv_state saved_state;
75
76 int overscan;
77 int flicker;
78 int saturation;
79 int hue;
80 enum nv17_tv_norm tv_norm;
81 int subconnector;
82 int select_subconnector;
83 uint32_t pin_mask;
84};
85#define to_tv_enc(x) container_of(nouveau_encoder(x), \
86 struct nv17_tv_encoder, base)
87
88extern char *nv17_tv_norm_names[NUM_TV_NORMS];
89
90extern struct nv17_tv_norm_params {
91 enum {
92 TV_ENC_MODE,
93 CTV_ENC_MODE,
94 } kind;
95
96 union {
97 struct {
98 int hdisplay;
99 int vdisplay;
100 int vrefresh; /* mHz */
101
102 uint8_t tv_enc[0x40];
103 } tv_enc_mode;
104
105 struct {
106 struct drm_display_mode mode;
107
108 uint32_t ctv_regs[38];
109 } ctv_enc_mode;
110 };
111
112} nv17_tv_norms[NUM_TV_NORMS];
113#define get_tv_norm(enc) (&nv17_tv_norms[to_tv_enc(enc)->tv_norm])
114
115extern struct drm_display_mode nv17_tv_modes[];
116
117static inline int interpolate(int y0, int y1, int y2, int x)
118{
119 return y1 + (x < 50 ? y1 - y0 : y2 - y1) * (x - 50) / 50;
120}
121
122void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state);
123void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state);
124void nv17_tv_update_properties(struct drm_encoder *encoder);
125void nv17_tv_update_rescaler(struct drm_encoder *encoder);
126void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
127
128/* TV hardware access functions */
129
130static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg, uint32_t val)
131{
132 nv_wr32(dev, reg, val);
133}
134
135static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
136{
137 return nv_rd32(dev, reg);
138}
139
140static inline void nv_write_tv_enc(struct drm_device *dev, uint8_t reg, uint8_t val)
141{
142 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
143 nv_write_ptv(dev, NV_PTV_TV_DATA, val);
144}
145
146static inline uint8_t nv_read_tv_enc(struct drm_device *dev, uint8_t reg)
147{
148 nv_write_ptv(dev, NV_PTV_TV_INDEX, reg);
149 return nv_read_ptv(dev, NV_PTV_TV_DATA);
150}
151
152#define nv_load_ptv(dev, state, reg) nv_write_ptv(dev, NV_PTV_OFFSET + 0x##reg, state->ptv_##reg)
153#define nv_save_ptv(dev, state, reg) state->ptv_##reg = nv_read_ptv(dev, NV_PTV_OFFSET + 0x##reg)
154#define nv_load_tv_enc(dev, state, reg) nv_write_tv_enc(dev, 0x##reg, state->tv_enc[0x##reg])
155
156#endif
diff --git a/drivers/gpu/drm/nouveau/nv17_tv_modes.c b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
new file mode 100644
index 000000000000..d64683d97e0d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv17_tv_modes.c
@@ -0,0 +1,583 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29#include "nouveau_drv.h"
30#include "nouveau_encoder.h"
31#include "nouveau_crtc.h"
32#include "nouveau_hw.h"
33#include "nv17_tv.h"
34
35char *nv17_tv_norm_names[NUM_TV_NORMS] = {
36 [TV_NORM_PAL] = "PAL",
37 [TV_NORM_PAL_M] = "PAL-M",
38 [TV_NORM_PAL_N] = "PAL-N",
39 [TV_NORM_PAL_NC] = "PAL-Nc",
40 [TV_NORM_NTSC_M] = "NTSC-M",
41 [TV_NORM_NTSC_J] = "NTSC-J",
42 [TV_NORM_HD480I] = "hd480i",
43 [TV_NORM_HD480P] = "hd480p",
44 [TV_NORM_HD576I] = "hd576i",
45 [TV_NORM_HD576P] = "hd576p",
46 [TV_NORM_HD720P] = "hd720p",
47 [TV_NORM_HD1080I] = "hd1080i"
48};
49
50/* TV standard specific parameters */
51
52struct nv17_tv_norm_params nv17_tv_norms[NUM_TV_NORMS] = {
53 [TV_NORM_PAL] = { TV_ENC_MODE, {
54 .tv_enc_mode = { 720, 576, 50000, {
55 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
56 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
57 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
58 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
59 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
60 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
61 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
62 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
63 } } } },
64
65 [TV_NORM_PAL_M] = { TV_ENC_MODE, {
66 .tv_enc_mode = { 720, 480, 59940, {
67 0x21, 0xe6, 0xef, 0xe3, 0x0, 0x0, 0xb, 0x18,
68 0x7e, 0x44, 0x76, 0x32, 0x25, 0x0, 0x3c, 0x0,
69 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
70 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
71 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
72 0x0, 0x18, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
73 0x0, 0xb4, 0x0, 0x15, 0x40, 0x10, 0x0, 0x9c,
74 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
75 } } } },
76
77 [TV_NORM_PAL_N] = { TV_ENC_MODE, {
78 .tv_enc_mode = { 720, 576, 50000, {
79 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
80 0x7e, 0x40, 0x8a, 0x32, 0x25, 0x0, 0x3c, 0x0,
81 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
82 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
83 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
84 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
85 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
86 0xbd, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
87 } } } },
88
89 [TV_NORM_PAL_NC] = { TV_ENC_MODE, {
90 .tv_enc_mode = { 720, 576, 50000, {
91 0x21, 0xf6, 0x94, 0x46, 0x0, 0x0, 0xb, 0x18,
92 0x7e, 0x44, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
93 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
94 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
95 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
96 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
97 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
98 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
99 } } } },
100
101 [TV_NORM_NTSC_M] = { TV_ENC_MODE, {
102 .tv_enc_mode = { 720, 480, 59940, {
103 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
104 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x3c, 0x0,
105 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
106 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
107 0xc5, 0x4, 0xc5, 0x1, 0x2, 0x0, 0xa, 0x5,
108 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
109 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0x9c,
110 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
111 } } } },
112
113 [TV_NORM_NTSC_J] = { TV_ENC_MODE, {
114 .tv_enc_mode = { 720, 480, 59940, {
115 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
116 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
117 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
118 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
119 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
120 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
121 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
122 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
123 } } } },
124
125 [TV_NORM_HD480I] = { TV_ENC_MODE, {
126 .tv_enc_mode = { 720, 480, 59940, {
127 0x21, 0xf0, 0x7c, 0x1f, 0x0, 0x0, 0xb, 0x18,
128 0x7e, 0x44, 0x76, 0x48, 0x0, 0x0, 0x32, 0x0,
129 0x3c, 0x0, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x83,
130 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x1,
131 0xcf, 0x4, 0xcf, 0x1, 0x2, 0x0, 0xa, 0x5,
132 0x0, 0x16, 0xff, 0x3, 0x20, 0xf, 0x78, 0x0,
133 0x0, 0xb4, 0x0, 0x15, 0x4, 0x10, 0x0, 0xa4,
134 0xc8, 0x15, 0x5, 0x15, 0x3c, 0x0, 0x0, 0x0
135 } } } },
136
137 [TV_NORM_HD576I] = { TV_ENC_MODE, {
138 .tv_enc_mode = { 720, 576, 50000, {
139 0x2a, 0x9, 0x8a, 0xcb, 0x0, 0x0, 0xb, 0x18,
140 0x7e, 0x40, 0x8a, 0x35, 0x27, 0x0, 0x34, 0x3,
141 0x3e, 0x3, 0x17, 0x21, 0x1b, 0x1b, 0x24, 0x9c,
142 0x1, 0x0, 0xf, 0xf, 0x60, 0x5, 0xd3, 0x3,
143 0xd3, 0x4, 0xd4, 0x1, 0x2, 0x0, 0xa, 0x5,
144 0x0, 0x1a, 0xff, 0x3, 0x18, 0xf, 0x78, 0x0,
145 0x0, 0xb4, 0x0, 0x15, 0x49, 0x10, 0x0, 0x9b,
146 0xbd, 0x15, 0x5, 0x15, 0x3e, 0x3, 0x0, 0x0
147 } } } },
148
149
150 [TV_NORM_HD480P] = { CTV_ENC_MODE, {
151 .ctv_enc_mode = {
152 .mode = { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000,
153 720, 735, 743, 858, 0, 480, 490, 494, 525, 0,
154 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
155 .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
156 0x354003a, 0x40000, 0x6f0344, 0x18100000,
157 0x10160004, 0x10060005, 0x1006000c, 0x10060020,
158 0x10060021, 0x140e0022, 0x10060202, 0x1802020a,
159 0x1810020b, 0x10000fff, 0x10000fff, 0x10000fff,
160 0x10000fff, 0x10000fff, 0x10000fff, 0x70,
161 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
162 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
163 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
164 } } } },
165
166 [TV_NORM_HD576P] = { CTV_ENC_MODE, {
167 .ctv_enc_mode = {
168 .mode = { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000,
169 720, 730, 738, 864, 0, 576, 581, 585, 625, 0,
170 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
171 .ctv_regs = { 0x3540000, 0x0, 0x0, 0x314,
172 0x354003a, 0x40000, 0x6f0344, 0x18100000,
173 0x10060001, 0x10060009, 0x10060026, 0x10060027,
174 0x140e0028, 0x10060268, 0x1810026d, 0x10000fff,
175 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff,
176 0x10000fff, 0x10000fff, 0x10000fff, 0x69,
177 0x3ff0000, 0x57, 0x2e001e, 0x258012c,
178 0xa0aa04ec, 0x30, 0x80960019, 0x12c0300,
179 0x2019, 0x600, 0x32060019, 0x0, 0x0, 0x400
180 } } } },
181
182 [TV_NORM_HD720P] = { CTV_ENC_MODE, {
183 .ctv_enc_mode = {
184 .mode = { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250,
185 1280, 1349, 1357, 1650, 0, 720, 725, 730, 750, 0,
186 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
187 .ctv_regs = { 0x1260394, 0x0, 0x0, 0x622,
188 0x66b0021, 0x6004a, 0x1210626, 0x8170000,
189 0x70004, 0x70016, 0x70017, 0x40f0018,
190 0x702e8, 0x81702ed, 0xfff, 0xfff,
191 0xfff, 0xfff, 0xfff, 0xfff,
192 0xfff, 0xfff, 0xfff, 0x0,
193 0x2e40001, 0x58, 0x2e001e, 0x258012c,
194 0xa0aa04ec, 0x30, 0x810c0039, 0x12c0300,
195 0xc0002039, 0x600, 0x32060039, 0x0, 0x0, 0x0
196 } } } },
197
198 [TV_NORM_HD1080I] = { CTV_ENC_MODE, {
199 .ctv_enc_mode = {
200 .mode = { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250,
201 1920, 1961, 2049, 2200, 0, 1080, 1084, 1088, 1125, 0,
202 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
203 | DRM_MODE_FLAG_INTERLACE) },
204 .ctv_regs = { 0xac0420, 0x44c0478, 0x4a4, 0x4fc0868,
205 0x8940028, 0x60054, 0xe80870, 0xbf70000,
206 0xbc70004, 0x70005, 0x70012, 0x70013,
207 0x40f0014, 0x70230, 0xbf70232, 0xbf70233,
208 0x1c70237, 0x70238, 0x70244, 0x70245,
209 0x40f0246, 0x70462, 0x1f70464, 0x0,
210 0x2e40001, 0x58, 0x2e001e, 0x258012c,
211 0xa0aa04ec, 0x30, 0x815f004c, 0x12c0300,
212 0xc000204c, 0x600, 0x3206004c, 0x0, 0x0, 0x0
213 } } } }
214};
215
216/*
217 * The following is some guesswork on how the TV encoder flicker
218 * filter/rescaler works:
219 *
220 * It seems to use some sort of resampling filter, it is controlled
221 * through the registers at NV_PTV_HFILTER and NV_PTV_VFILTER, they
222 * control the horizontal and vertical stage respectively, there is
223 * also NV_PTV_HFILTER2 the blob fills identically to NV_PTV_HFILTER,
224 * but they seem to do nothing. A rough guess might be that they could
225 * be used to independently control the filtering of each interlaced
226 * field, but I don't know how they are enabled. The whole filtering
227 * process seems to be disabled with bits 26:27 of PTV_200, but we
228 * aren't doing that.
229 *
230 * The layout of both register sets is the same:
231 *
232 * A: [BASE+0x18]...[BASE+0x0] [BASE+0x58]..[BASE+0x40]
233 * B: [BASE+0x34]...[BASE+0x1c] [BASE+0x74]..[BASE+0x5c]
234 *
235 * Each coefficient is stored in bits [31],[15:9] in two's complement
236 * format. They seem to be some kind of weights used in a low-pass
237 * filter. Both A and B coefficients are applied to the 14 nearest
238 * samples on each side (Listed from nearest to furthermost. They
239 * roughly cover 2 framebuffer pixels on each side). They are
240 * probably multiplied with some more hardwired weights before being
241 * used: B-coefficients are applied the same on both sides,
242 * A-coefficients are inverted before being applied to the opposite
243 * side.
244 *
245 * After all the hassle, I got the following formula by empirical
246 * means...
247 */
248
249#define calc_overscan(o) interpolate(0x100, 0xe1, 0xc1, o)
250
251#define id1 (1LL << 8)
252#define id2 (1LL << 16)
253#define id3 (1LL << 24)
254#define id4 (1LL << 32)
255#define id5 (1LL << 48)
256
257static struct filter_params{
258 int64_t k1;
259 int64_t ki;
260 int64_t ki2;
261 int64_t ki3;
262 int64_t kr;
263 int64_t kir;
264 int64_t ki2r;
265 int64_t ki3r;
266 int64_t kf;
267 int64_t kif;
268 int64_t ki2f;
269 int64_t ki3f;
270 int64_t krf;
271 int64_t kirf;
272 int64_t ki2rf;
273 int64_t ki3rf;
274} fparams[2][4] = {
275 /* Horizontal filter parameters */
276 {
277 {64.311690 * id5, -39.516924 * id5, 6.586143 * id5, 0.000002 * id5,
278 0.051285 * id4, 26.168746 * id4, -4.361449 * id4, -0.000001 * id4,
279 9.308169 * id3, 78.180965 * id3, -13.030158 * id3, -0.000001 * id3,
280 -8.801540 * id1, -46.572890 * id1, 7.762145 * id1, -0.000000 * id1},
281 {-44.565569 * id5, -68.081246 * id5, 39.812074 * id5, -4.009316 * id5,
282 29.832207 * id4, 50.047322 * id4, -25.380017 * id4, 2.546422 * id4,
283 104.605622 * id3, 141.908641 * id3, -74.322319 * id3, 7.484316 * id3,
284 -37.081621 * id1, -90.397510 * id1, 42.784229 * id1, -4.289952 * id1},
285 {-56.793244 * id5, 31.153584 * id5, -5.192247 * id5, -0.000003 * id5,
286 33.541131 * id4, -34.149302 * id4, 5.691537 * id4, 0.000002 * id4,
287 87.196610 * id3, -88.995169 * id3, 14.832456 * id3, 0.000012 * id3,
288 17.288138 * id1, 71.864786 * id1, -11.977408 * id1, -0.000009 * id1},
289 {51.787796 * id5, 21.211771 * id5, -18.993730 * id5, 1.853310 * id5,
290 -41.470726 * id4, -17.775823 * id4, 13.057821 * id4, -1.15823 * id4,
291 -154.235673 * id3, -44.878641 * id3, 40.656077 * id3, -3.695595 * id3,
292 112.201065 * id1, 39.992155 * id1, -25.155714 * id1, 2.113984 * id1},
293 },
294
295 /* Vertical filter parameters */
296 {
297 {67.601979 * id5, 0.428319 * id5, -0.071318 * id5, -0.000012 * id5,
298 -3.402339 * id4, 0.000209 * id4, -0.000092 * id4, 0.000010 * id4,
299 -9.180996 * id3, 6.111270 * id3, -1.024457 * id3, 0.001043 * id3,
300 6.060315 * id1, -0.017425 * id1, 0.007830 * id1, -0.000869 * id1},
301 {6.755647 * id5, 5.841348 * id5, 1.469734 * id5, -0.149656 * id5,
302 8.293120 * id4, -1.192888 * id4, -0.947652 * id4, 0.094507 * id4,
303 37.526655 * id3, 10.257875 * id3, -10.823275 * id3, 1.081497 * id3,
304 -2.361928 * id1, -2.059432 * id1, 1.840671 * id1, -0.168100 * id1},
305 {-14.780391 * id5, -16.042148 * id5, 2.673692 * id5, -0.000000 * id5,
306 39.541978 * id4, 5.680053 * id4, -0.946676 * id4, 0.000000 * id4,
307 152.994486 * id3, 12.625439 * id3, -2.119579 * id3, 0.002708 * id3,
308 -38.125089 * id1, -0.855880 * id1, 0.155359 * id1, -0.002245 * id1},
309 {-27.476193 * id5, -1.454976 * id5, 1.286557 * id5, 0.025346 * id5,
310 20.687300 * id4, 3.014003 * id4, -0.557786 * id4, -0.01311 * id4,
311 60.008737 * id3, -0.738273 * id3, 5.408217 * id3, -0.796798 * id3,
312 -17.296835 * id1, 4.438577 * id1, -2.809420 * id1, 0.385491 * id1},
313 }
314};
315
316static void tv_setup_filter(struct drm_encoder *encoder)
317{
318 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
319 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
320 struct drm_display_mode *mode = &encoder->crtc->mode;
321 uint32_t (*filters[])[4][7] = {&tv_enc->state.hfilter,
322 &tv_enc->state.vfilter};
323 int i, j, k;
324 int32_t overscan = calc_overscan(tv_enc->overscan);
325 int64_t flicker = (tv_enc->flicker - 50) * (id3 / 100);
326 uint64_t rs[] = {mode->hdisplay * id3,
327 mode->vdisplay * id3};
328
329 do_div(rs[0], overscan * tv_norm->tv_enc_mode.hdisplay);
330 do_div(rs[1], overscan * tv_norm->tv_enc_mode.vdisplay);
331
332 for (k = 0; k < 2; k++) {
333 rs[k] = max((int64_t)rs[k], id2);
334
335 for (j = 0; j < 4; j++) {
336 struct filter_params *p = &fparams[k][j];
337
338 for (i = 0; i < 7; i++) {
339 int64_t c = (p->k1 + p->ki*i + p->ki2*i*i + p->ki3*i*i*i)
340 + (p->kr + p->kir*i + p->ki2r*i*i + p->ki3r*i*i*i)*rs[k]
341 + (p->kf + p->kif*i + p->ki2f*i*i + p->ki3f*i*i*i)*flicker
342 + (p->krf + p->kirf*i + p->ki2rf*i*i + p->ki3rf*i*i*i)*flicker*rs[k];
343
344 (*filters[k])[j][i] = (c + id5/2) >> 39 & (0x1 << 31 | 0x7f << 9);
345 }
346 }
347 }
348}
349
350/* Hardware state saving/restoring */
351
352static void tv_save_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
353{
354 int i, j;
355 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
356
357 for (i = 0; i < 4; i++) {
358 for (j = 0; j < 7; j++)
359 regs[i][j] = nv_read_ptv(dev, offsets[i]+4*j);
360 }
361}
362
363static void tv_load_filter(struct drm_device *dev, uint32_t base, uint32_t regs[4][7])
364{
365 int i, j;
366 uint32_t offsets[] = { base, base + 0x1c, base + 0x40, base + 0x5c };
367
368 for (i = 0; i < 4; i++) {
369 for (j = 0; j < 7; j++)
370 nv_write_ptv(dev, offsets[i]+4*j, regs[i][j]);
371 }
372}
373
374void nv17_tv_state_save(struct drm_device *dev, struct nv17_tv_state *state)
375{
376 int i;
377
378 for (i = 0; i < 0x40; i++)
379 state->tv_enc[i] = nv_read_tv_enc(dev, i);
380
381 tv_save_filter(dev, NV_PTV_HFILTER, state->hfilter);
382 tv_save_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
383 tv_save_filter(dev, NV_PTV_VFILTER, state->vfilter);
384
385 nv_save_ptv(dev, state, 200);
386 nv_save_ptv(dev, state, 204);
387 nv_save_ptv(dev, state, 208);
388 nv_save_ptv(dev, state, 20c);
389 nv_save_ptv(dev, state, 304);
390 nv_save_ptv(dev, state, 500);
391 nv_save_ptv(dev, state, 504);
392 nv_save_ptv(dev, state, 508);
393 nv_save_ptv(dev, state, 600);
394 nv_save_ptv(dev, state, 604);
395 nv_save_ptv(dev, state, 608);
396 nv_save_ptv(dev, state, 60c);
397 nv_save_ptv(dev, state, 610);
398 nv_save_ptv(dev, state, 614);
399}
400
401void nv17_tv_state_load(struct drm_device *dev, struct nv17_tv_state *state)
402{
403 int i;
404
405 for (i = 0; i < 0x40; i++)
406 nv_write_tv_enc(dev, i, state->tv_enc[i]);
407
408 tv_load_filter(dev, NV_PTV_HFILTER, state->hfilter);
409 tv_load_filter(dev, NV_PTV_HFILTER2, state->hfilter2);
410 tv_load_filter(dev, NV_PTV_VFILTER, state->vfilter);
411
412 nv_load_ptv(dev, state, 200);
413 nv_load_ptv(dev, state, 204);
414 nv_load_ptv(dev, state, 208);
415 nv_load_ptv(dev, state, 20c);
416 nv_load_ptv(dev, state, 304);
417 nv_load_ptv(dev, state, 500);
418 nv_load_ptv(dev, state, 504);
419 nv_load_ptv(dev, state, 508);
420 nv_load_ptv(dev, state, 600);
421 nv_load_ptv(dev, state, 604);
422 nv_load_ptv(dev, state, 608);
423 nv_load_ptv(dev, state, 60c);
424 nv_load_ptv(dev, state, 610);
425 nv_load_ptv(dev, state, 614);
426
427 /* This is required for some settings to kick in. */
428 nv_write_tv_enc(dev, 0x3e, 1);
429 nv_write_tv_enc(dev, 0x3e, 0);
430}
431
432/* Timings similar to the ones the blob sets */
433
434struct drm_display_mode nv17_tv_modes[] = {
435 { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 0,
436 320, 344, 392, 560, 0, 200, 200, 202, 220, 0,
437 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
438 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
439 { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 0,
440 320, 344, 392, 560, 0, 240, 240, 246, 263, 0,
441 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC
442 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
443 { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 0,
444 400, 432, 496, 640, 0, 300, 300, 303, 314, 0,
445 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC
446 | DRM_MODE_FLAG_DBLSCAN | DRM_MODE_FLAG_CLKDIV2) },
447 { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 0,
448 640, 672, 768, 880, 0, 480, 480, 492, 525, 0,
449 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
450 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 0,
451 720, 752, 872, 960, 0, 480, 480, 493, 525, 0,
452 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
453 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 0,
454 720, 776, 856, 960, 0, 576, 576, 588, 597, 0,
455 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
456 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 0,
457 800, 840, 920, 1040, 0, 600, 600, 604, 618, 0,
458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
459 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 0,
460 1024, 1064, 1200, 1344, 0, 768, 768, 777, 806, 0,
461 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
462 {}
463};
464
465void nv17_tv_update_properties(struct drm_encoder *encoder)
466{
467 struct drm_device *dev = encoder->dev;
468 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
469 struct nv17_tv_state *regs = &tv_enc->state;
470 struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
471 int subconnector = tv_enc->select_subconnector ?
472 tv_enc->select_subconnector :
473 tv_enc->subconnector;
474
475 switch (subconnector) {
476 case DRM_MODE_SUBCONNECTOR_Composite:
477 {
478 regs->ptv_204 = 0x2;
479
480 /* The composite connector may be found on either pin. */
481 if (tv_enc->pin_mask & 0x4)
482 regs->ptv_204 |= 0x010000;
483 else if (tv_enc->pin_mask & 0x2)
484 regs->ptv_204 |= 0x100000;
485 else
486 regs->ptv_204 |= 0x110000;
487
488 regs->tv_enc[0x7] = 0x10;
489 break;
490 }
491 case DRM_MODE_SUBCONNECTOR_SVIDEO:
492 regs->ptv_204 = 0x11012;
493 regs->tv_enc[0x7] = 0x18;
494 break;
495
496 case DRM_MODE_SUBCONNECTOR_Component:
497 regs->ptv_204 = 0x111333;
498 regs->tv_enc[0x7] = 0x14;
499 break;
500
501 case DRM_MODE_SUBCONNECTOR_SCART:
502 regs->ptv_204 = 0x111012;
503 regs->tv_enc[0x7] = 0x18;
504 break;
505 }
506
507 regs->tv_enc[0x20] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x20], 255,
508 tv_enc->saturation);
509 regs->tv_enc[0x22] = interpolate(0, tv_norm->tv_enc_mode.tv_enc[0x22], 255,
510 tv_enc->saturation);
511 regs->tv_enc[0x25] = tv_enc->hue * 255 / 100;
512
513 nv_load_ptv(dev, regs, 204);
514 nv_load_tv_enc(dev, regs, 7);
515 nv_load_tv_enc(dev, regs, 20);
516 nv_load_tv_enc(dev, regs, 22);
517 nv_load_tv_enc(dev, regs, 25);
518}
519
520void nv17_tv_update_rescaler(struct drm_encoder *encoder)
521{
522 struct drm_device *dev = encoder->dev;
523 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
524 struct nv17_tv_state *regs = &tv_enc->state;
525
526 regs->ptv_208 = 0x40 | (calc_overscan(tv_enc->overscan) << 8);
527
528 tv_setup_filter(encoder);
529
530 nv_load_ptv(dev, regs, 208);
531 tv_load_filter(dev, NV_PTV_HFILTER, regs->hfilter);
532 tv_load_filter(dev, NV_PTV_HFILTER2, regs->hfilter2);
533 tv_load_filter(dev, NV_PTV_VFILTER, regs->vfilter);
534}
535
536void nv17_ctv_update_rescaler(struct drm_encoder *encoder)
537{
538 struct drm_device *dev = encoder->dev;
539 struct drm_nouveau_private *dev_priv = dev->dev_private;
540 struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
541 int head = nouveau_crtc(encoder->crtc)->index;
542 struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head];
543 struct drm_display_mode *crtc_mode = &encoder->crtc->mode;
544 struct drm_display_mode *output_mode = &get_tv_norm(encoder)->ctv_enc_mode.mode;
545 int overscan, hmargin, vmargin, hratio, vratio;
546
547 /* The rescaler doesn't do the right thing for interlaced modes. */
548 if (output_mode->flags & DRM_MODE_FLAG_INTERLACE)
549 overscan = 100;
550 else
551 overscan = tv_enc->overscan;
552
553 hmargin = (output_mode->hdisplay - crtc_mode->hdisplay) / 2;
554 vmargin = (output_mode->vdisplay - crtc_mode->vdisplay) / 2;
555
556 hmargin = interpolate(0, min(hmargin, output_mode->hdisplay/20), hmargin,
557 overscan);
558 vmargin = interpolate(0, min(vmargin, output_mode->vdisplay/20), vmargin,
559 overscan);
560
561 hratio = crtc_mode->hdisplay * 0x800 / (output_mode->hdisplay - 2*hmargin);
562 vratio = crtc_mode->vdisplay * 0x800 / (output_mode->vdisplay - 2*vmargin) & ~3;
563
564 regs->fp_horiz_regs[FP_VALID_START] = hmargin;
565 regs->fp_horiz_regs[FP_VALID_END] = output_mode->hdisplay - hmargin - 1;
566 regs->fp_vert_regs[FP_VALID_START] = vmargin;
567 regs->fp_vert_regs[FP_VALID_END] = output_mode->vdisplay - vmargin - 1;
568
569 regs->fp_debug_1 = NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE |
570 XLATE(vratio, 0, NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE) |
571 NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE |
572 XLATE(hratio, 0, NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE);
573
574 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_START,
575 regs->fp_horiz_regs[FP_VALID_START]);
576 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HVALID_END,
577 regs->fp_horiz_regs[FP_VALID_END]);
578 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_START,
579 regs->fp_vert_regs[FP_VALID_START]);
580 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_VVALID_END,
581 regs->fp_vert_regs[FP_VALID_END]);
582 NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regs->fp_debug_1);
583}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 000000000000..18ba74f19703
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,780 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6/*
7 * NV20
8 * -----
9 * There are 3 families :
10 * NV20 is 0x10de:0x020*
11 * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
12 * NV2A is 0x10de:0x02A0
13 *
14 * NV30
15 * -----
16 * There are 3 families :
17 * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
18 * NV34 is 0x10de:0x032*
19 * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
20 *
21 * Not seen in the wild, no dumps (probably NV35) :
22 * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
23 * NV38 is 0x10de:0x0333, 0x10de:0x00fe
24 *
25 */
26
27#define NV20_GRCTX_SIZE (3580*4)
28#define NV25_GRCTX_SIZE (3529*4)
29#define NV2A_GRCTX_SIZE (3500*4)
30
31#define NV30_31_GRCTX_SIZE (24392)
32#define NV34_GRCTX_SIZE (18140)
33#define NV35_36_GRCTX_SIZE (22396)
34
35static void
36nv20_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
37{
38 int i;
39
40 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
41 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
42 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
43 nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
44 nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
45 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
46 for (i = 0x04d4; i <= 0x04e0; i += 4)
47 nv_wo32(dev, ctx, i/4, 0x00030303);
48 for (i = 0x04f4; i <= 0x0500; i += 4)
49 nv_wo32(dev, ctx, i/4, 0x00080000);
50 for (i = 0x050c; i <= 0x0518; i += 4)
51 nv_wo32(dev, ctx, i/4, 0x01012000);
52 for (i = 0x051c; i <= 0x0528; i += 4)
53 nv_wo32(dev, ctx, i/4, 0x000105b8);
54 for (i = 0x052c; i <= 0x0538; i += 4)
55 nv_wo32(dev, ctx, i/4, 0x00080008);
56 for (i = 0x055c; i <= 0x0598; i += 4)
57 nv_wo32(dev, ctx, i/4, 0x07ff0000);
58 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
59 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
60 nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
61 nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
62 nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
63 nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
64 for (i = 0x1c1c; i <= 0x248c; i += 16) {
65 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
66 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
67 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
68 }
69 nv_wo32(dev, ctx, 0x281c/4, 0x3f800000);
70 nv_wo32(dev, ctx, 0x2830/4, 0x3f800000);
71 nv_wo32(dev, ctx, 0x285c/4, 0x40000000);
72 nv_wo32(dev, ctx, 0x2860/4, 0x3f800000);
73 nv_wo32(dev, ctx, 0x2864/4, 0x3f000000);
74 nv_wo32(dev, ctx, 0x286c/4, 0x40000000);
75 nv_wo32(dev, ctx, 0x2870/4, 0x3f800000);
76 nv_wo32(dev, ctx, 0x2878/4, 0xbf800000);
77 nv_wo32(dev, ctx, 0x2880/4, 0xbf800000);
78 nv_wo32(dev, ctx, 0x34a4/4, 0x000fe000);
79 nv_wo32(dev, ctx, 0x3530/4, 0x000003f8);
80 nv_wo32(dev, ctx, 0x3540/4, 0x002fe000);
81 for (i = 0x355c; i <= 0x3578; i += 4)
82 nv_wo32(dev, ctx, i/4, 0x001c527c);
83}
84
85static void
86nv25_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
87{
88 int i;
89
90 nv_wo32(dev, ctx, 0x035c/4, 0xffff0000);
91 nv_wo32(dev, ctx, 0x03c0/4, 0x0fff0000);
92 nv_wo32(dev, ctx, 0x03c4/4, 0x0fff0000);
93 nv_wo32(dev, ctx, 0x049c/4, 0x00000101);
94 nv_wo32(dev, ctx, 0x04b0/4, 0x00000111);
95 nv_wo32(dev, ctx, 0x04c8/4, 0x00000080);
96 nv_wo32(dev, ctx, 0x04cc/4, 0xffff0000);
97 nv_wo32(dev, ctx, 0x04d0/4, 0x00000001);
98 nv_wo32(dev, ctx, 0x04e4/4, 0x44400000);
99 nv_wo32(dev, ctx, 0x04fc/4, 0x4b800000);
100 for (i = 0x0510; i <= 0x051c; i += 4)
101 nv_wo32(dev, ctx, i/4, 0x00030303);
102 for (i = 0x0530; i <= 0x053c; i += 4)
103 nv_wo32(dev, ctx, i/4, 0x00080000);
104 for (i = 0x0548; i <= 0x0554; i += 4)
105 nv_wo32(dev, ctx, i/4, 0x01012000);
106 for (i = 0x0558; i <= 0x0564; i += 4)
107 nv_wo32(dev, ctx, i/4, 0x000105b8);
108 for (i = 0x0568; i <= 0x0574; i += 4)
109 nv_wo32(dev, ctx, i/4, 0x00080008);
110 for (i = 0x0598; i <= 0x05d4; i += 4)
111 nv_wo32(dev, ctx, i/4, 0x07ff0000);
112 nv_wo32(dev, ctx, 0x05e0/4, 0x4b7fffff);
113 nv_wo32(dev, ctx, 0x0620/4, 0x00000080);
114 nv_wo32(dev, ctx, 0x0624/4, 0x30201000);
115 nv_wo32(dev, ctx, 0x0628/4, 0x70605040);
116 nv_wo32(dev, ctx, 0x062c/4, 0xb0a09080);
117 nv_wo32(dev, ctx, 0x0630/4, 0xf0e0d0c0);
118 nv_wo32(dev, ctx, 0x0664/4, 0x00000001);
119 nv_wo32(dev, ctx, 0x066c/4, 0x00004000);
120 nv_wo32(dev, ctx, 0x0678/4, 0x00000001);
121 nv_wo32(dev, ctx, 0x0680/4, 0x00040000);
122 nv_wo32(dev, ctx, 0x0684/4, 0x00010000);
123 for (i = 0x1b04; i <= 0x2374; i += 16) {
124 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
125 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
126 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
127 }
128 nv_wo32(dev, ctx, 0x2704/4, 0x3f800000);
129 nv_wo32(dev, ctx, 0x2718/4, 0x3f800000);
130 nv_wo32(dev, ctx, 0x2744/4, 0x40000000);
131 nv_wo32(dev, ctx, 0x2748/4, 0x3f800000);
132 nv_wo32(dev, ctx, 0x274c/4, 0x3f000000);
133 nv_wo32(dev, ctx, 0x2754/4, 0x40000000);
134 nv_wo32(dev, ctx, 0x2758/4, 0x3f800000);
135 nv_wo32(dev, ctx, 0x2760/4, 0xbf800000);
136 nv_wo32(dev, ctx, 0x2768/4, 0xbf800000);
137 nv_wo32(dev, ctx, 0x308c/4, 0x000fe000);
138 nv_wo32(dev, ctx, 0x3108/4, 0x000003f8);
139 nv_wo32(dev, ctx, 0x3468/4, 0x002fe000);
140 for (i = 0x3484; i <= 0x34a0; i += 4)
141 nv_wo32(dev, ctx, i/4, 0x001c527c);
142}
143
144static void
145nv2a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
146{
147 int i;
148
149 nv_wo32(dev, ctx, 0x033c/4, 0xffff0000);
150 nv_wo32(dev, ctx, 0x03a0/4, 0x0fff0000);
151 nv_wo32(dev, ctx, 0x03a4/4, 0x0fff0000);
152 nv_wo32(dev, ctx, 0x047c/4, 0x00000101);
153 nv_wo32(dev, ctx, 0x0490/4, 0x00000111);
154 nv_wo32(dev, ctx, 0x04a8/4, 0x44400000);
155 for (i = 0x04d4; i <= 0x04e0; i += 4)
156 nv_wo32(dev, ctx, i/4, 0x00030303);
157 for (i = 0x04f4; i <= 0x0500; i += 4)
158 nv_wo32(dev, ctx, i/4, 0x00080000);
159 for (i = 0x050c; i <= 0x0518; i += 4)
160 nv_wo32(dev, ctx, i/4, 0x01012000);
161 for (i = 0x051c; i <= 0x0528; i += 4)
162 nv_wo32(dev, ctx, i/4, 0x000105b8);
163 for (i = 0x052c; i <= 0x0538; i += 4)
164 nv_wo32(dev, ctx, i/4, 0x00080008);
165 for (i = 0x055c; i <= 0x0598; i += 4)
166 nv_wo32(dev, ctx, i/4, 0x07ff0000);
167 nv_wo32(dev, ctx, 0x05a4/4, 0x4b7fffff);
168 nv_wo32(dev, ctx, 0x05fc/4, 0x00000001);
169 nv_wo32(dev, ctx, 0x0604/4, 0x00004000);
170 nv_wo32(dev, ctx, 0x0610/4, 0x00000001);
171 nv_wo32(dev, ctx, 0x0618/4, 0x00040000);
172 nv_wo32(dev, ctx, 0x061c/4, 0x00010000);
173 for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
174 nv_wo32(dev, ctx, (i + 0)/4, 0x10700ff9);
175 nv_wo32(dev, ctx, (i + 4)/4, 0x0436086c);
176 nv_wo32(dev, ctx, (i + 8)/4, 0x000c001b);
177 }
178 nv_wo32(dev, ctx, 0x269c/4, 0x3f800000);
179 nv_wo32(dev, ctx, 0x26b0/4, 0x3f800000);
180 nv_wo32(dev, ctx, 0x26dc/4, 0x40000000);
181 nv_wo32(dev, ctx, 0x26e0/4, 0x3f800000);
182 nv_wo32(dev, ctx, 0x26e4/4, 0x3f000000);
183 nv_wo32(dev, ctx, 0x26ec/4, 0x40000000);
184 nv_wo32(dev, ctx, 0x26f0/4, 0x3f800000);
185 nv_wo32(dev, ctx, 0x26f8/4, 0xbf800000);
186 nv_wo32(dev, ctx, 0x2700/4, 0xbf800000);
187 nv_wo32(dev, ctx, 0x3024/4, 0x000fe000);
188 nv_wo32(dev, ctx, 0x30a0/4, 0x000003f8);
189 nv_wo32(dev, ctx, 0x33fc/4, 0x002fe000);
190 for (i = 0x341c; i <= 0x3438; i += 4)
191 nv_wo32(dev, ctx, i/4, 0x001c527c);
192}
193
194static void
195nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
196{
197 int i;
198
199 nv_wo32(dev, ctx, 0x0410/4, 0x00000101);
200 nv_wo32(dev, ctx, 0x0424/4, 0x00000111);
201 nv_wo32(dev, ctx, 0x0428/4, 0x00000060);
202 nv_wo32(dev, ctx, 0x0444/4, 0x00000080);
203 nv_wo32(dev, ctx, 0x0448/4, 0xffff0000);
204 nv_wo32(dev, ctx, 0x044c/4, 0x00000001);
205 nv_wo32(dev, ctx, 0x0460/4, 0x44400000);
206 nv_wo32(dev, ctx, 0x048c/4, 0xffff0000);
207 for (i = 0x04e0; i < 0x04e8; i += 4)
208 nv_wo32(dev, ctx, i/4, 0x0fff0000);
209 nv_wo32(dev, ctx, 0x04ec/4, 0x00011100);
210 for (i = 0x0508; i < 0x0548; i += 4)
211 nv_wo32(dev, ctx, i/4, 0x07ff0000);
212 nv_wo32(dev, ctx, 0x0550/4, 0x4b7fffff);
213 nv_wo32(dev, ctx, 0x058c/4, 0x00000080);
214 nv_wo32(dev, ctx, 0x0590/4, 0x30201000);
215 nv_wo32(dev, ctx, 0x0594/4, 0x70605040);
216 nv_wo32(dev, ctx, 0x0598/4, 0xb8a89888);
217 nv_wo32(dev, ctx, 0x059c/4, 0xf8e8d8c8);
218 nv_wo32(dev, ctx, 0x05b0/4, 0xb0000000);
219 for (i = 0x0600; i < 0x0640; i += 4)
220 nv_wo32(dev, ctx, i/4, 0x00010588);
221 for (i = 0x0640; i < 0x0680; i += 4)
222 nv_wo32(dev, ctx, i/4, 0x00030303);
223 for (i = 0x06c0; i < 0x0700; i += 4)
224 nv_wo32(dev, ctx, i/4, 0x0008aae4);
225 for (i = 0x0700; i < 0x0740; i += 4)
226 nv_wo32(dev, ctx, i/4, 0x01012000);
227 for (i = 0x0740; i < 0x0780; i += 4)
228 nv_wo32(dev, ctx, i/4, 0x00080008);
229 nv_wo32(dev, ctx, 0x085c/4, 0x00040000);
230 nv_wo32(dev, ctx, 0x0860/4, 0x00010000);
231 for (i = 0x0864; i < 0x0874; i += 4)
232 nv_wo32(dev, ctx, i/4, 0x00040004);
233 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
234 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
235 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
236 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
237 }
238 for (i = 0x30b8; i < 0x30c8; i += 4)
239 nv_wo32(dev, ctx, i/4, 0x0000ffff);
240 nv_wo32(dev, ctx, 0x344c/4, 0x3f800000);
241 nv_wo32(dev, ctx, 0x3808/4, 0x3f800000);
242 nv_wo32(dev, ctx, 0x381c/4, 0x3f800000);
243 nv_wo32(dev, ctx, 0x3848/4, 0x40000000);
244 nv_wo32(dev, ctx, 0x384c/4, 0x3f800000);
245 nv_wo32(dev, ctx, 0x3850/4, 0x3f000000);
246 nv_wo32(dev, ctx, 0x3858/4, 0x40000000);
247 nv_wo32(dev, ctx, 0x385c/4, 0x3f800000);
248 nv_wo32(dev, ctx, 0x3864/4, 0xbf800000);
249 nv_wo32(dev, ctx, 0x386c/4, 0xbf800000);
250}
251
252static void
253nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
254{
255 int i;
256
257 nv_wo32(dev, ctx, 0x040c/4, 0x01000101);
258 nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
259 nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
260 nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
261 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
262 nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
263 nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
264 nv_wo32(dev, ctx, 0x0480/4, 0xffff0000);
265 for (i = 0x04d4; i < 0x04dc; i += 4)
266 nv_wo32(dev, ctx, i/4, 0x0fff0000);
267 nv_wo32(dev, ctx, 0x04e0/4, 0x00011100);
268 for (i = 0x04fc; i < 0x053c; i += 4)
269 nv_wo32(dev, ctx, i/4, 0x07ff0000);
270 nv_wo32(dev, ctx, 0x0544/4, 0x4b7fffff);
271 nv_wo32(dev, ctx, 0x057c/4, 0x00000080);
272 nv_wo32(dev, ctx, 0x0580/4, 0x30201000);
273 nv_wo32(dev, ctx, 0x0584/4, 0x70605040);
274 nv_wo32(dev, ctx, 0x0588/4, 0xb8a89888);
275 nv_wo32(dev, ctx, 0x058c/4, 0xf8e8d8c8);
276 nv_wo32(dev, ctx, 0x05a0/4, 0xb0000000);
277 for (i = 0x05f0; i < 0x0630; i += 4)
278 nv_wo32(dev, ctx, i/4, 0x00010588);
279 for (i = 0x0630; i < 0x0670; i += 4)
280 nv_wo32(dev, ctx, i/4, 0x00030303);
281 for (i = 0x06b0; i < 0x06f0; i += 4)
282 nv_wo32(dev, ctx, i/4, 0x0008aae4);
283 for (i = 0x06f0; i < 0x0730; i += 4)
284 nv_wo32(dev, ctx, i/4, 0x01012000);
285 for (i = 0x0730; i < 0x0770; i += 4)
286 nv_wo32(dev, ctx, i/4, 0x00080008);
287 nv_wo32(dev, ctx, 0x0850/4, 0x00040000);
288 nv_wo32(dev, ctx, 0x0854/4, 0x00010000);
289 for (i = 0x0858; i < 0x0868; i += 4)
290 nv_wo32(dev, ctx, i/4, 0x00040004);
291 for (i = 0x15ac; i <= 0x271c ; i += 16) {
292 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
293 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
294 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
295 }
296 for (i = 0x274c; i < 0x275c; i += 4)
297 nv_wo32(dev, ctx, i/4, 0x0000ffff);
298 nv_wo32(dev, ctx, 0x2ae0/4, 0x3f800000);
299 nv_wo32(dev, ctx, 0x2e9c/4, 0x3f800000);
300 nv_wo32(dev, ctx, 0x2eb0/4, 0x3f800000);
301 nv_wo32(dev, ctx, 0x2edc/4, 0x40000000);
302 nv_wo32(dev, ctx, 0x2ee0/4, 0x3f800000);
303 nv_wo32(dev, ctx, 0x2ee4/4, 0x3f000000);
304 nv_wo32(dev, ctx, 0x2eec/4, 0x40000000);
305 nv_wo32(dev, ctx, 0x2ef0/4, 0x3f800000);
306 nv_wo32(dev, ctx, 0x2ef8/4, 0xbf800000);
307 nv_wo32(dev, ctx, 0x2f00/4, 0xbf800000);
308}
309
310static void
311nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
312{
313 int i;
314
315 nv_wo32(dev, ctx, 0x040c/4, 0x00000101);
316 nv_wo32(dev, ctx, 0x0420/4, 0x00000111);
317 nv_wo32(dev, ctx, 0x0424/4, 0x00000060);
318 nv_wo32(dev, ctx, 0x0440/4, 0x00000080);
319 nv_wo32(dev, ctx, 0x0444/4, 0xffff0000);
320 nv_wo32(dev, ctx, 0x0448/4, 0x00000001);
321 nv_wo32(dev, ctx, 0x045c/4, 0x44400000);
322 nv_wo32(dev, ctx, 0x0488/4, 0xffff0000);
323 for (i = 0x04dc; i < 0x04e4; i += 4)
324 nv_wo32(dev, ctx, i/4, 0x0fff0000);
325 nv_wo32(dev, ctx, 0x04e8/4, 0x00011100);
326 for (i = 0x0504; i < 0x0544; i += 4)
327 nv_wo32(dev, ctx, i/4, 0x07ff0000);
328 nv_wo32(dev, ctx, 0x054c/4, 0x4b7fffff);
329 nv_wo32(dev, ctx, 0x0588/4, 0x00000080);
330 nv_wo32(dev, ctx, 0x058c/4, 0x30201000);
331 nv_wo32(dev, ctx, 0x0590/4, 0x70605040);
332 nv_wo32(dev, ctx, 0x0594/4, 0xb8a89888);
333 nv_wo32(dev, ctx, 0x0598/4, 0xf8e8d8c8);
334 nv_wo32(dev, ctx, 0x05ac/4, 0xb0000000);
335 for (i = 0x0604; i < 0x0644; i += 4)
336 nv_wo32(dev, ctx, i/4, 0x00010588);
337 for (i = 0x0644; i < 0x0684; i += 4)
338 nv_wo32(dev, ctx, i/4, 0x00030303);
339 for (i = 0x06c4; i < 0x0704; i += 4)
340 nv_wo32(dev, ctx, i/4, 0x0008aae4);
341 for (i = 0x0704; i < 0x0744; i += 4)
342 nv_wo32(dev, ctx, i/4, 0x01012000);
343 for (i = 0x0744; i < 0x0784; i += 4)
344 nv_wo32(dev, ctx, i/4, 0x00080008);
345 nv_wo32(dev, ctx, 0x0860/4, 0x00040000);
346 nv_wo32(dev, ctx, 0x0864/4, 0x00010000);
347 for (i = 0x0868; i < 0x0878; i += 4)
348 nv_wo32(dev, ctx, i/4, 0x00040004);
349 for (i = 0x1f1c; i <= 0x308c ; i += 16) {
350 nv_wo32(dev, ctx, i/4 + 0, 0x10700ff9);
351 nv_wo32(dev, ctx, i/4 + 1, 0x0436086c);
352 nv_wo32(dev, ctx, i/4 + 2, 0x000c001b);
353 }
354 for (i = 0x30bc; i < 0x30cc; i += 4)
355 nv_wo32(dev, ctx, i/4, 0x0000ffff);
356 nv_wo32(dev, ctx, 0x3450/4, 0x3f800000);
357 nv_wo32(dev, ctx, 0x380c/4, 0x3f800000);
358 nv_wo32(dev, ctx, 0x3820/4, 0x3f800000);
359 nv_wo32(dev, ctx, 0x384c/4, 0x40000000);
360 nv_wo32(dev, ctx, 0x3850/4, 0x3f800000);
361 nv_wo32(dev, ctx, 0x3854/4, 0x3f000000);
362 nv_wo32(dev, ctx, 0x385c/4, 0x40000000);
363 nv_wo32(dev, ctx, 0x3860/4, 0x3f800000);
364 nv_wo32(dev, ctx, 0x3868/4, 0xbf800000);
365 nv_wo32(dev, ctx, 0x3870/4, 0xbf800000);
366}
367
368int
369nv20_graph_create_context(struct nouveau_channel *chan)
370{
371 struct drm_device *dev = chan->dev;
372 struct drm_nouveau_private *dev_priv = dev->dev_private;
373 void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
374 unsigned int ctx_size;
375 unsigned int idoffs = 0x28/4;
376 int ret;
377
378 switch (dev_priv->chipset) {
379 case 0x20:
380 ctx_size = NV20_GRCTX_SIZE;
381 ctx_init = nv20_graph_context_init;
382 idoffs = 0;
383 break;
384 case 0x25:
385 case 0x28:
386 ctx_size = NV25_GRCTX_SIZE;
387 ctx_init = nv25_graph_context_init;
388 break;
389 case 0x2a:
390 ctx_size = NV2A_GRCTX_SIZE;
391 ctx_init = nv2a_graph_context_init;
392 idoffs = 0;
393 break;
394 case 0x30:
395 case 0x31:
396 ctx_size = NV30_31_GRCTX_SIZE;
397 ctx_init = nv30_31_graph_context_init;
398 break;
399 case 0x34:
400 ctx_size = NV34_GRCTX_SIZE;
401 ctx_init = nv34_graph_context_init;
402 break;
403 case 0x35:
404 case 0x36:
405 ctx_size = NV35_36_GRCTX_SIZE;
406 ctx_init = nv35_36_graph_context_init;
407 break;
408 default:
409 ctx_size = 0;
410 ctx_init = nv35_36_graph_context_init;
411 NV_ERROR(dev, "Please contact the devs if you want your NV%x"
412 " card to work\n", dev_priv->chipset);
413 return -ENOSYS;
414 break;
415 }
416
417 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
418 NVOBJ_FLAG_ZERO_ALLOC,
419 &chan->ramin_grctx);
420 if (ret)
421 return ret;
422
423 /* Initialise default context values */
424 dev_priv->engine.instmem.prepare_access(dev, true);
425 ctx_init(dev, chan->ramin_grctx->gpuobj);
426
427 /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
428 nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
429 (chan->id << 24) | 0x1); /* CTX_USER */
430
431 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
432 chan->ramin_grctx->instance >> 4);
433
434 dev_priv->engine.instmem.finish_access(dev);
435 return 0;
436}
437
438void
439nv20_graph_destroy_context(struct nouveau_channel *chan)
440{
441 struct drm_device *dev = chan->dev;
442 struct drm_nouveau_private *dev_priv = dev->dev_private;
443
444 if (chan->ramin_grctx)
445 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
446
447 dev_priv->engine.instmem.prepare_access(dev, true);
448 nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
449 dev_priv->engine.instmem.finish_access(dev);
450}
451
452int
453nv20_graph_load_context(struct nouveau_channel *chan)
454{
455 struct drm_device *dev = chan->dev;
456 uint32_t inst;
457
458 if (!chan->ramin_grctx)
459 return -EINVAL;
460 inst = chan->ramin_grctx->instance >> 4;
461
462 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
463 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
464 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
465 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
466
467 nouveau_wait_for_idle(dev);
468 return 0;
469}
470
471int
472nv20_graph_unload_context(struct drm_device *dev)
473{
474 struct drm_nouveau_private *dev_priv = dev->dev_private;
475 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
476 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
477 struct nouveau_channel *chan;
478 uint32_t inst, tmp;
479
480 chan = pgraph->channel(dev);
481 if (!chan)
482 return 0;
483 inst = chan->ramin_grctx->instance >> 4;
484
485 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
486 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
487 NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
488
489 nouveau_wait_for_idle(dev);
490
491 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
492 tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
493 tmp |= (pfifo->channels - 1) << 24;
494 nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
495 return 0;
496}
497
498static void
499nv20_graph_rdi(struct drm_device *dev)
500{
501 struct drm_nouveau_private *dev_priv = dev->dev_private;
502 int i, writecount = 32;
503 uint32_t rdi_index = 0x2c80000;
504
505 if (dev_priv->chipset == 0x20) {
506 rdi_index = 0x3d0000;
507 writecount = 15;
508 }
509
510 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
511 for (i = 0; i < writecount; i++)
512 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
513
514 nouveau_wait_for_idle(dev);
515}
516
517int
518nv20_graph_init(struct drm_device *dev)
519{
520 struct drm_nouveau_private *dev_priv =
521 (struct drm_nouveau_private *)dev->dev_private;
522 uint32_t tmp, vramsz;
523 int ret, i;
524
525 nv_wr32(dev, NV03_PMC_ENABLE,
526 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
527 nv_wr32(dev, NV03_PMC_ENABLE,
528 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
529
530 if (!dev_priv->ctx_table) {
531 /* Create Context Pointer Table */
532 dev_priv->ctx_table_size = 32 * 4;
533 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
534 dev_priv->ctx_table_size, 16,
535 NVOBJ_FLAG_ZERO_ALLOC,
536 &dev_priv->ctx_table);
537 if (ret)
538 return ret;
539 }
540
541 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
542 dev_priv->ctx_table->instance >> 4);
543
544 nv20_graph_rdi(dev);
545
546 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
547 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
548
549 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
550 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
551 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
552 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
553 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
554 nv_wr32(dev, 0x40009C , 0x00000040);
555
556 if (dev_priv->chipset >= 0x25) {
557 nv_wr32(dev, 0x400890, 0x00080000);
558 nv_wr32(dev, 0x400610, 0x304B1FB6);
559 nv_wr32(dev, 0x400B80, 0x18B82880);
560 nv_wr32(dev, 0x400B84, 0x44000000);
561 nv_wr32(dev, 0x400098, 0x40000080);
562 nv_wr32(dev, 0x400B88, 0x000000ff);
563 } else {
564 nv_wr32(dev, 0x400880, 0x00080000); /* 0x0008c7df */
565 nv_wr32(dev, 0x400094, 0x00000005);
566 nv_wr32(dev, 0x400B80, 0x45CAA208); /* 0x45eae20e */
567 nv_wr32(dev, 0x400B84, 0x24000000);
568 nv_wr32(dev, 0x400098, 0x00000040);
569 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
570 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
571 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
572 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
573 }
574
575 /* copy tile info from PFB */
576 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
577 nv_wr32(dev, 0x00400904 + i * 0x10,
578 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
579 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
580 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
581 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
582 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
583 nv_wr32(dev, 0x00400908 + i * 0x10,
584 nv_rd32(dev, NV10_PFB_TSIZE(i)));
585 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
586 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
587 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
588 nv_rd32(dev, NV10_PFB_TSIZE(i)));
589 nv_wr32(dev, 0x00400900 + i * 0x10,
590 nv_rd32(dev, NV10_PFB_TILE(i)));
591 /* which is NV40_PGRAPH_TILE0(i) ?? */
592 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
593 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
594 nv_rd32(dev, NV10_PFB_TILE(i)));
595 }
596 for (i = 0; i < 8; i++) {
597 nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
598 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
599 nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
600 nv_rd32(dev, 0x100300 + i * 4));
601 }
602 nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
603 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
604 nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
605
606 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
607 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
608
609 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
610 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
611 tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
612 nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
613
614 /* begin RAM config */
615 vramsz = drm_get_resource_len(dev, 0) - 1;
616 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
617 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
618 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
619 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
620 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
621 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
622 nv_wr32(dev, 0x400820, 0);
623 nv_wr32(dev, 0x400824, 0);
624 nv_wr32(dev, 0x400864, vramsz - 1);
625 nv_wr32(dev, 0x400868, vramsz - 1);
626
627 /* interesting.. the below overwrites some of the tile setup above.. */
628 nv_wr32(dev, 0x400B20, 0x00000000);
629 nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
630
631 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
632 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
633 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
634 nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
635
636 return 0;
637}
638
639void
640nv20_graph_takedown(struct drm_device *dev)
641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643
644 nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
645}
646
647int
648nv30_graph_init(struct drm_device *dev)
649{
650 struct drm_nouveau_private *dev_priv = dev->dev_private;
651 int ret, i;
652
653 nv_wr32(dev, NV03_PMC_ENABLE,
654 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
655 nv_wr32(dev, NV03_PMC_ENABLE,
656 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
657
658 if (!dev_priv->ctx_table) {
659 /* Create Context Pointer Table */
660 dev_priv->ctx_table_size = 32 * 4;
661 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
662 dev_priv->ctx_table_size, 16,
663 NVOBJ_FLAG_ZERO_ALLOC,
664 &dev_priv->ctx_table);
665 if (ret)
666 return ret;
667 }
668
669 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
670 dev_priv->ctx_table->instance >> 4);
671
672 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
673 nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
674
675 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
676 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
677 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
678 nv_wr32(dev, 0x400890, 0x01b463ff);
679 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
680 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
681 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
682 nv_wr32(dev, 0x400B80, 0x1003d888);
683 nv_wr32(dev, 0x400B84, 0x0c000000);
684 nv_wr32(dev, 0x400098, 0x00000000);
685 nv_wr32(dev, 0x40009C, 0x0005ad00);
686 nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
687 nv_wr32(dev, 0x4000a0, 0x00000000);
688 nv_wr32(dev, 0x4000a4, 0x00000008);
689 nv_wr32(dev, 0x4008a8, 0xb784a400);
690 nv_wr32(dev, 0x400ba0, 0x002f8685);
691 nv_wr32(dev, 0x400ba4, 0x00231f3f);
692 nv_wr32(dev, 0x4008a4, 0x40000020);
693
694 if (dev_priv->chipset == 0x34) {
695 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
696 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
697 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
698 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
699 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
700 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
701 nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
702 nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
703 }
704
705 nv_wr32(dev, 0x4000c0, 0x00000016);
706
707 /* copy tile info from PFB */
708 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
709 nv_wr32(dev, 0x00400904 + i * 0x10,
710 nv_rd32(dev, NV10_PFB_TLIMIT(i)));
711 /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
712 nv_wr32(dev, 0x00400908 + i * 0x10,
713 nv_rd32(dev, NV10_PFB_TSIZE(i)));
714 /* which is NV40_PGRAPH_TSIZE0(i) ?? */
715 nv_wr32(dev, 0x00400900 + i * 0x10,
716 nv_rd32(dev, NV10_PFB_TILE(i)));
717 /* which is NV40_PGRAPH_TILE0(i) ?? */
718 }
719
720 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
721 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
722 nv_wr32(dev, 0x0040075c , 0x00000001);
723
724 /* begin RAM config */
725 /* vramsz = drm_get_resource_len(dev, 0) - 1; */
726 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
727 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
728 if (dev_priv->chipset != 0x34) {
729 nv_wr32(dev, 0x400750, 0x00EA0000);
730 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
731 nv_wr32(dev, 0x400750, 0x00EA0004);
732 nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
733 }
734
735 return 0;
736}
737
738struct nouveau_pgraph_object_class nv20_graph_grclass[] = {
739 { 0x0030, false, NULL }, /* null */
740 { 0x0039, false, NULL }, /* m2mf */
741 { 0x004a, false, NULL }, /* gdirect */
742 { 0x009f, false, NULL }, /* imageblit (nv12) */
743 { 0x008a, false, NULL }, /* ifc */
744 { 0x0089, false, NULL }, /* sifm */
745 { 0x0062, false, NULL }, /* surf2d */
746 { 0x0043, false, NULL }, /* rop */
747 { 0x0012, false, NULL }, /* beta1 */
748 { 0x0072, false, NULL }, /* beta4 */
749 { 0x0019, false, NULL }, /* cliprect */
750 { 0x0044, false, NULL }, /* pattern */
751 { 0x009e, false, NULL }, /* swzsurf */
752 { 0x0096, false, NULL }, /* celcius */
753 { 0x0097, false, NULL }, /* kelvin (nv20) */
754 { 0x0597, false, NULL }, /* kelvin (nv25) */
755 {}
756};
757
758struct nouveau_pgraph_object_class nv30_graph_grclass[] = {
759 { 0x0030, false, NULL }, /* null */
760 { 0x0039, false, NULL }, /* m2mf */
761 { 0x004a, false, NULL }, /* gdirect */
762 { 0x009f, false, NULL }, /* imageblit (nv12) */
763 { 0x008a, false, NULL }, /* ifc */
764 { 0x038a, false, NULL }, /* ifc (nv30) */
765 { 0x0089, false, NULL }, /* sifm */
766 { 0x0389, false, NULL }, /* sifm (nv30) */
767 { 0x0062, false, NULL }, /* surf2d */
768 { 0x0362, false, NULL }, /* surf2d (nv30) */
769 { 0x0043, false, NULL }, /* rop */
770 { 0x0012, false, NULL }, /* beta1 */
771 { 0x0072, false, NULL }, /* beta4 */
772 { 0x0019, false, NULL }, /* cliprect */
773 { 0x0044, false, NULL }, /* pattern */
774 { 0x039e, false, NULL }, /* swzsurf */
775 { 0x0397, false, NULL }, /* rankine (nv30) */
776 { 0x0497, false, NULL }, /* rankine (nv35) */
777 { 0x0697, false, NULL }, /* rankine (nv34) */
778 {}
779};
780
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 000000000000..ca1d27107a8e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,62 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv40_fb_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t fb_bar_size, tmp;
11 int num_tiles;
12 int i;
13
14 /* This is strictly a NV4x register (don't know about NV5x). */
15 /* The blob sets these to all kinds of values, and they mess up our setup. */
16 /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
17 /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
18 /* Any idea what this is? */
19 nv_wr32(dev, NV40_PFB_UNK_800, 0x1);
20
21 switch (dev_priv->chipset) {
22 case 0x40:
23 case 0x45:
24 tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
25 nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
26 num_tiles = NV10_PFB_TILE__SIZE;
27 break;
28 case 0x46: /* G72 */
29 case 0x47: /* G70 */
30 case 0x49: /* G71 */
31 case 0x4b: /* G73 */
32 case 0x4c: /* C51 (G7X version) */
33 num_tiles = NV40_PFB_TILE__SIZE_1;
34 break;
35 default:
36 num_tiles = NV40_PFB_TILE__SIZE_0;
37 break;
38 }
39
40 fb_bar_size = drm_get_resource_len(dev, 0) - 1;
41 switch (dev_priv->chipset) {
42 case 0x40:
43 for (i = 0; i < num_tiles; i++) {
44 nv_wr32(dev, NV10_PFB_TILE(i), 0);
45 nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
46 }
47 break;
48 default:
49 for (i = 0; i < num_tiles; i++) {
50 nv_wr32(dev, NV40_PFB_TILE(i), 0);
51 nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
52 }
53 break;
54 }
55
56 return 0;
57}
58
59void
60nv40_fb_takedown(struct drm_device *dev)
61{
62}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 000000000000..b4f19ccb8b41
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,314 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "nouveau_drv.h"
29#include "nouveau_drm.h"
30
31#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV40_RAMFC__SIZE))
32#define NV40_RAMFC__SIZE 128
33
34int
35nv40_fifo_create_context(struct nouveau_channel *chan)
36{
37 struct drm_device *dev = chan->dev;
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
39 uint32_t fc = NV40_RAMFC(chan->id);
40 int ret;
41
42 ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
43 NV40_RAMFC__SIZE, NVOBJ_FLAG_ZERO_ALLOC |
44 NVOBJ_FLAG_ZERO_FREE, NULL, &chan->ramfc);
45 if (ret)
46 return ret;
47
48 dev_priv->engine.instmem.prepare_access(dev, true);
49 nv_wi32(dev, fc + 0, chan->pushbuf_base);
50 nv_wi32(dev, fc + 4, chan->pushbuf_base);
51 nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
52 nv_wi32(dev, fc + 24, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
53 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
54 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
55#ifdef __BIG_ENDIAN
56 NV_PFIFO_CACHE1_BIG_ENDIAN |
57#endif
58 0x30000000 /* no idea.. */);
59 nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
60 nv_wi32(dev, fc + 60, 0x0001FFFF);
61 dev_priv->engine.instmem.finish_access(dev);
62
63 /* enable the fifo dma operation */
64 nv_wr32(dev, NV04_PFIFO_MODE,
65 nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
66 return 0;
67}
68
69void
70nv40_fifo_destroy_context(struct nouveau_channel *chan)
71{
72 struct drm_device *dev = chan->dev;
73
74 nv_wr32(dev, NV04_PFIFO_MODE,
75 nv_rd32(dev, NV04_PFIFO_MODE) & ~(1 << chan->id));
76
77 if (chan->ramfc)
78 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
79}
80
81static void
82nv40_fifo_do_load_context(struct drm_device *dev, int chid)
83{
84 struct drm_nouveau_private *dev_priv = dev->dev_private;
85 uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
86
87 dev_priv->engine.instmem.prepare_access(dev, false);
88
89 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
90 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
91 nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
92 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, nv_ri32(dev, fc + 12));
93 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, nv_ri32(dev, fc + 16));
94 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 20));
95
96 /* No idea what 0x2058 is.. */
97 tmp = nv_ri32(dev, fc + 24);
98 tmp2 = nv_rd32(dev, 0x2058) & 0xFFF;
99 tmp2 |= (tmp & 0x30000000);
100 nv_wr32(dev, 0x2058, tmp2);
101 tmp &= ~0x30000000;
102 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, tmp);
103
104 nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 28));
105 nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 32));
106 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE, nv_ri32(dev, fc + 36));
107 tmp = nv_ri32(dev, fc + 40);
108 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, tmp);
109 nv_wr32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT, nv_ri32(dev, fc + 44));
110 nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, nv_ri32(dev, fc + 48));
111 nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 52));
112 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, nv_ri32(dev, fc + 56));
113
114 /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
115 tmp = nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
116 tmp |= nv_ri32(dev, fc + 60) & 0x1FFFF;
117 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, tmp);
118
119 nv_wr32(dev, 0x32e4, nv_ri32(dev, fc + 64));
120 /* NVIDIA does this next line twice... */
121 nv_wr32(dev, 0x32e8, nv_ri32(dev, fc + 68));
122 nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
123 nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
124
125 dev_priv->engine.instmem.finish_access(dev);
126
127 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
128 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
129}
130
131int
132nv40_fifo_load_context(struct nouveau_channel *chan)
133{
134 struct drm_device *dev = chan->dev;
135 uint32_t tmp;
136
137 nv40_fifo_do_load_context(dev, chan->id);
138
139 /* Set channel active, and in DMA mode */
140 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
141 NV40_PFIFO_CACHE1_PUSH1_DMA | chan->id);
142 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
143
144 /* Reset DMA_CTL_AT_INFO to INVALID */
145 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
146 nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
147
148 return 0;
149}
150
151int
152nv40_fifo_unload_context(struct drm_device *dev)
153{
154 struct drm_nouveau_private *dev_priv = dev->dev_private;
155 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
156 uint32_t fc, tmp;
157 int chid;
158
159 chid = pfifo->channel_id(dev);
160 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
161 return 0;
162 fc = NV40_RAMFC(chid);
163
164 dev_priv->engine.instmem.prepare_access(dev, true);
165 nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
166 nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
167 nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
168 nv_wi32(dev, fc + 12, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE));
169 nv_wi32(dev, fc + 16, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT));
170 nv_wi32(dev, fc + 20, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
171 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH);
172 tmp |= nv_rd32(dev, 0x2058) & 0x30000000;
173 nv_wi32(dev, fc + 24, tmp);
174 nv_wi32(dev, fc + 28, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
175 nv_wi32(dev, fc + 32, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
176 nv_wi32(dev, fc + 36, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
177 tmp = nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
178 nv_wi32(dev, fc + 40, tmp);
179 nv_wi32(dev, fc + 44, nv_rd32(dev, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
180 nv_wi32(dev, fc + 48, nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE));
181 /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
182 * more involved depending on the value of 0x3228?
183 */
184 nv_wi32(dev, fc + 52, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
185 nv_wi32(dev, fc + 56, nv_rd32(dev, NV40_PFIFO_GRCTX_INSTANCE));
186 nv_wi32(dev, fc + 60, nv_rd32(dev, NV04_PFIFO_DMA_TIMESLICE) & 0x1ffff);
187 /* No idea what the below is for exactly, ripped from a mmio-trace */
188 nv_wi32(dev, fc + 64, nv_rd32(dev, NV40_PFIFO_UNK32E4));
189 /* NVIDIA do this next line twice.. bug? */
190 nv_wi32(dev, fc + 68, nv_rd32(dev, 0x32e8));
191 nv_wi32(dev, fc + 76, nv_rd32(dev, 0x2088));
192 nv_wi32(dev, fc + 80, nv_rd32(dev, 0x3300));
193#if 0 /* no real idea which is PUT/GET in UNK_48.. */
194 tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_GET);
195 tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
196 nv_wi32(dev, fc + 72, tmp);
197#endif
198 dev_priv->engine.instmem.finish_access(dev);
199
200 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
201 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
202 NV40_PFIFO_CACHE1_PUSH1_DMA | (pfifo->channels - 1));
203 return 0;
204}
205
206static void
207nv40_fifo_init_reset(struct drm_device *dev)
208{
209 int i;
210
211 nv_wr32(dev, NV03_PMC_ENABLE,
212 nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
213 nv_wr32(dev, NV03_PMC_ENABLE,
214 nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
215
216 nv_wr32(dev, 0x003224, 0x000f0078);
217 nv_wr32(dev, 0x003210, 0x00000000);
218 nv_wr32(dev, 0x003270, 0x00000000);
219 nv_wr32(dev, 0x003240, 0x00000000);
220 nv_wr32(dev, 0x003244, 0x00000000);
221 nv_wr32(dev, 0x003258, 0x00000000);
222 nv_wr32(dev, 0x002504, 0x00000000);
223 for (i = 0; i < 16; i++)
224 nv_wr32(dev, 0x002510 + (i * 4), 0x00000000);
225 nv_wr32(dev, 0x00250c, 0x0000ffff);
226 nv_wr32(dev, 0x002048, 0x00000000);
227 nv_wr32(dev, 0x003228, 0x00000000);
228 nv_wr32(dev, 0x0032e8, 0x00000000);
229 nv_wr32(dev, 0x002410, 0x00000000);
230 nv_wr32(dev, 0x002420, 0x00000000);
231 nv_wr32(dev, 0x002058, 0x00000001);
232 nv_wr32(dev, 0x00221c, 0x00000000);
233 /* something with 0x2084, read/modify/write, no change */
234 nv_wr32(dev, 0x002040, 0x000000ff);
235 nv_wr32(dev, 0x002500, 0x00000000);
236 nv_wr32(dev, 0x003200, 0x00000000);
237
238 nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
239}
240
241static void
242nv40_fifo_init_ramxx(struct drm_device *dev)
243{
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245
246 nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
247 ((dev_priv->ramht_bits - 9) << 16) |
248 (dev_priv->ramht_offset >> 8));
249 nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
250
251 switch (dev_priv->chipset) {
252 case 0x47:
253 case 0x49:
254 case 0x4b:
255 nv_wr32(dev, 0x2230, 1);
256 break;
257 default:
258 break;
259 }
260
261 switch (dev_priv->chipset) {
262 case 0x40:
263 case 0x41:
264 case 0x42:
265 case 0x43:
266 case 0x45:
267 case 0x47:
268 case 0x48:
269 case 0x49:
270 case 0x4b:
271 nv_wr32(dev, NV40_PFIFO_RAMFC, 0x30002);
272 break;
273 default:
274 nv_wr32(dev, 0x2230, 0);
275 nv_wr32(dev, NV40_PFIFO_RAMFC,
276 ((nouveau_mem_fb_amount(dev) - 512 * 1024 +
277 dev_priv->ramfc_offset) >> 16) | (3 << 16));
278 break;
279 }
280}
281
282static void
283nv40_fifo_init_intr(struct drm_device *dev)
284{
285 nv_wr32(dev, 0x002100, 0xffffffff);
286 nv_wr32(dev, 0x002140, 0xffffffff);
287}
288
289int
290nv40_fifo_init(struct drm_device *dev)
291{
292 struct drm_nouveau_private *dev_priv = dev->dev_private;
293 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
294 int i;
295
296 nv40_fifo_init_reset(dev);
297 nv40_fifo_init_ramxx(dev);
298
299 nv40_fifo_do_load_context(dev, pfifo->channels - 1);
300 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
301
302 nv40_fifo_init_intr(dev);
303 pfifo->enable(dev);
304 pfifo->reassign(dev, true);
305
306 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
307 if (dev_priv->fifos[i]) {
308 uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
309 nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
310 }
311 }
312
313 return 0;
314}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 000000000000..d3e0a2a6acf8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,560 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include <linux/firmware.h>
28
29#include "drmP.h"
30#include "drm.h"
31#include "nouveau_drv.h"
32
33MODULE_FIRMWARE("nouveau/nv40.ctxprog");
34MODULE_FIRMWARE("nouveau/nv40.ctxvals");
35MODULE_FIRMWARE("nouveau/nv41.ctxprog");
36MODULE_FIRMWARE("nouveau/nv41.ctxvals");
37MODULE_FIRMWARE("nouveau/nv42.ctxprog");
38MODULE_FIRMWARE("nouveau/nv42.ctxvals");
39MODULE_FIRMWARE("nouveau/nv43.ctxprog");
40MODULE_FIRMWARE("nouveau/nv43.ctxvals");
41MODULE_FIRMWARE("nouveau/nv44.ctxprog");
42MODULE_FIRMWARE("nouveau/nv44.ctxvals");
43MODULE_FIRMWARE("nouveau/nv46.ctxprog");
44MODULE_FIRMWARE("nouveau/nv46.ctxvals");
45MODULE_FIRMWARE("nouveau/nv47.ctxprog");
46MODULE_FIRMWARE("nouveau/nv47.ctxvals");
47MODULE_FIRMWARE("nouveau/nv49.ctxprog");
48MODULE_FIRMWARE("nouveau/nv49.ctxvals");
49MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
50MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
51MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
52MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
53MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
54MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
55MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
56MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
57
58struct nouveau_channel *
59nv40_graph_channel(struct drm_device *dev)
60{
61 struct drm_nouveau_private *dev_priv = dev->dev_private;
62 uint32_t inst;
63 int i;
64
65 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
66 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
67 return NULL;
68 inst = (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) << 4;
69
70 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
71 struct nouveau_channel *chan = dev_priv->fifos[i];
72
73 if (chan && chan->ramin_grctx &&
74 chan->ramin_grctx->instance == inst)
75 return chan;
76 }
77
78 return NULL;
79}
80
81int
82nv40_graph_create_context(struct nouveau_channel *chan)
83{
84 struct drm_device *dev = chan->dev;
85 struct drm_nouveau_private *dev_priv = dev->dev_private;
86 struct nouveau_gpuobj *ctx;
87 int ret;
88
89 /* Allocate a 175KiB block of PRAMIN to store the context. This
90 * is massive overkill for a lot of chipsets, but it should be safe
91 * until we're able to implement this properly (will happen at more
92 * or less the same time we're able to write our own context programs.
93 */
94 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
95 NVOBJ_FLAG_ZERO_ALLOC,
96 &chan->ramin_grctx);
97 if (ret)
98 return ret;
99 ctx = chan->ramin_grctx->gpuobj;
100
101 /* Initialise default context values */
102 dev_priv->engine.instmem.prepare_access(dev, true);
103 nv40_grctx_vals_load(dev, ctx);
104 nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
105 dev_priv->engine.instmem.finish_access(dev);
106
107 return 0;
108}
109
110void
111nv40_graph_destroy_context(struct nouveau_channel *chan)
112{
113 nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
114}
115
116static int
117nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
118{
119 uint32_t old_cp, tv = 1000, tmp;
120 int i;
121
122 old_cp = nv_rd32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER);
123 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
124
125 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0310);
126 tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
127 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
128 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0310, tmp);
129
130 tmp = nv_rd32(dev, NV40_PGRAPH_CTXCTL_0304);
131 tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
132 nv_wr32(dev, NV40_PGRAPH_CTXCTL_0304, tmp);
133
134 nouveau_wait_for_idle(dev);
135
136 for (i = 0; i < tv; i++) {
137 if (nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C) == 0)
138 break;
139 }
140
141 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
142
143 if (i == tv) {
144 uint32_t ucstat = nv_rd32(dev, NV40_PGRAPH_CTXCTL_UCODE_STAT);
145 NV_ERROR(dev, "Failed: Instance=0x%08x Save=%d\n", inst, save);
146 NV_ERROR(dev, "IP: 0x%02x, Opcode: 0x%08x\n",
147 ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
148 ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
149 NV_ERROR(dev, "0x40030C = 0x%08x\n",
150 nv_rd32(dev, NV40_PGRAPH_CTXCTL_030C));
151 return -EBUSY;
152 }
153
154 return 0;
155}
156
157/* Restore the context for a specific channel into PGRAPH */
158int
159nv40_graph_load_context(struct nouveau_channel *chan)
160{
161 struct drm_device *dev = chan->dev;
162 uint32_t inst;
163 int ret;
164
165 if (!chan->ramin_grctx)
166 return -EINVAL;
167 inst = chan->ramin_grctx->instance >> 4;
168
169 ret = nv40_graph_transfer_context(dev, inst, 0);
170 if (ret)
171 return ret;
172
173 /* 0x40032C, no idea of it's exact function. Could simply be a
174 * record of the currently active PGRAPH context. It's currently
175 * unknown as to what bit 24 does. The nv ddx has it set, so we will
176 * set it here too.
177 */
178 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
179 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR,
180 (inst & NV40_PGRAPH_CTXCTL_CUR_INSTANCE) |
181 NV40_PGRAPH_CTXCTL_CUR_LOADED);
182 /* 0x32E0 records the instance address of the active FIFO's PGRAPH
183 * context. If at any time this doesn't match 0x40032C, you will
184 * recieve PGRAPH_INTR_CONTEXT_SWITCH
185 */
186 nv_wr32(dev, NV40_PFIFO_GRCTX_INSTANCE, inst);
187 return 0;
188}
189
190int
191nv40_graph_unload_context(struct drm_device *dev)
192{
193 uint32_t inst;
194 int ret;
195
196 inst = nv_rd32(dev, NV40_PGRAPH_CTXCTL_CUR);
197 if (!(inst & NV40_PGRAPH_CTXCTL_CUR_LOADED))
198 return 0;
199 inst &= NV40_PGRAPH_CTXCTL_CUR_INSTANCE;
200
201 ret = nv40_graph_transfer_context(dev, inst, 1);
202
203 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, inst);
204 return ret;
205}
206
207struct nouveau_ctxprog {
208 uint32_t signature;
209 uint8_t version;
210 uint16_t length;
211 uint32_t data[];
212} __attribute__ ((packed));
213
214struct nouveau_ctxvals {
215 uint32_t signature;
216 uint8_t version;
217 uint32_t length;
218 struct {
219 uint32_t offset;
220 uint32_t value;
221 } data[];
222} __attribute__ ((packed));
223
224int
225nv40_grctx_init(struct drm_device *dev)
226{
227 struct drm_nouveau_private *dev_priv = dev->dev_private;
228 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
229 const int chipset = dev_priv->chipset;
230 const struct firmware *fw;
231 const struct nouveau_ctxprog *cp;
232 const struct nouveau_ctxvals *cv;
233 char name[32];
234 int ret, i;
235
236 pgraph->accel_blocked = true;
237
238 if (!pgraph->ctxprog) {
239 sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
240 ret = request_firmware(&fw, name, &dev->pdev->dev);
241 if (ret) {
242 NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
243 return ret;
244 }
245
246 pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
247 if (!pgraph->ctxprog) {
248 NV_ERROR(dev, "OOM copying ctxprog\n");
249 release_firmware(fw);
250 return -ENOMEM;
251 }
252 memcpy(pgraph->ctxprog, fw->data, fw->size);
253
254 cp = pgraph->ctxprog;
255 if (cp->signature != 0x5043564e || cp->version != 0 ||
256 cp->length != ((fw->size - 7) / 4)) {
257 NV_ERROR(dev, "ctxprog invalid\n");
258 release_firmware(fw);
259 nv40_grctx_fini(dev);
260 return -EINVAL;
261 }
262 release_firmware(fw);
263 }
264
265 if (!pgraph->ctxvals) {
266 sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
267 ret = request_firmware(&fw, name, &dev->pdev->dev);
268 if (ret) {
269 NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
270 nv40_grctx_fini(dev);
271 return ret;
272 }
273
274 pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
275 if (!pgraph->ctxprog) {
276 NV_ERROR(dev, "OOM copying ctxprog\n");
277 release_firmware(fw);
278 nv40_grctx_fini(dev);
279 return -ENOMEM;
280 }
281 memcpy(pgraph->ctxvals, fw->data, fw->size);
282
283 cv = (void *)pgraph->ctxvals;
284 if (cv->signature != 0x5643564e || cv->version != 0 ||
285 cv->length != ((fw->size - 9) / 8)) {
286 NV_ERROR(dev, "ctxvals invalid\n");
287 release_firmware(fw);
288 nv40_grctx_fini(dev);
289 return -EINVAL;
290 }
291 release_firmware(fw);
292 }
293
294 cp = pgraph->ctxprog;
295
296 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
297 for (i = 0; i < cp->length; i++)
298 nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp->data[i]);
299
300 pgraph->accel_blocked = false;
301 return 0;
302}
303
304void
305nv40_grctx_fini(struct drm_device *dev)
306{
307 struct drm_nouveau_private *dev_priv = dev->dev_private;
308 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
309
310 if (pgraph->ctxprog) {
311 kfree(pgraph->ctxprog);
312 pgraph->ctxprog = NULL;
313 }
314
315 if (pgraph->ctxvals) {
316 kfree(pgraph->ctxprog);
317 pgraph->ctxvals = NULL;
318 }
319}
320
321void
322nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
323{
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
326 struct nouveau_ctxvals *cv = pgraph->ctxvals;
327 int i;
328
329 if (!cv)
330 return;
331
332 for (i = 0; i < cv->length; i++)
333 nv_wo32(dev, ctx, cv->data[i].offset, cv->data[i].value);
334}
335
336/*
337 * G70 0x47
338 * G71 0x49
339 * NV45 0x48
340 * G72[M] 0x46
341 * G73 0x4b
342 * C51_G7X 0x4c
343 * C51 0x4e
344 */
345int
346nv40_graph_init(struct drm_device *dev)
347{
348 struct drm_nouveau_private *dev_priv =
349 (struct drm_nouveau_private *)dev->dev_private;
350 uint32_t vramsz, tmp;
351 int i, j;
352
353 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
354 ~NV_PMC_ENABLE_PGRAPH);
355 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
356 NV_PMC_ENABLE_PGRAPH);
357
358 nv40_grctx_init(dev);
359
360 /* No context present currently */
361 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
362
363 nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
364 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
365
366 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
367 nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
368 nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
369 nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
370 nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
371 nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
372
373 nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
374 nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
375
376 j = nv_rd32(dev, 0x1540) & 0xff;
377 if (j) {
378 for (i = 0; !(j & 1); j >>= 1, i++)
379 ;
380 nv_wr32(dev, 0x405000, i);
381 }
382
383 if (dev_priv->chipset == 0x40) {
384 nv_wr32(dev, 0x4009b0, 0x83280fff);
385 nv_wr32(dev, 0x4009b4, 0x000000a0);
386 } else {
387 nv_wr32(dev, 0x400820, 0x83280eff);
388 nv_wr32(dev, 0x400824, 0x000000a0);
389 }
390
391 switch (dev_priv->chipset) {
392 case 0x40:
393 case 0x45:
394 nv_wr32(dev, 0x4009b8, 0x0078e366);
395 nv_wr32(dev, 0x4009bc, 0x0000014c);
396 break;
397 case 0x41:
398 case 0x42: /* pciid also 0x00Cx */
399 /* case 0x0120: XXX (pciid) */
400 nv_wr32(dev, 0x400828, 0x007596ff);
401 nv_wr32(dev, 0x40082c, 0x00000108);
402 break;
403 case 0x43:
404 nv_wr32(dev, 0x400828, 0x0072cb77);
405 nv_wr32(dev, 0x40082c, 0x00000108);
406 break;
407 case 0x44:
408 case 0x46: /* G72 */
409 case 0x4a:
410 case 0x4c: /* G7x-based C51 */
411 case 0x4e:
412 nv_wr32(dev, 0x400860, 0);
413 nv_wr32(dev, 0x400864, 0);
414 break;
415 case 0x47: /* G70 */
416 case 0x49: /* G71 */
417 case 0x4b: /* G73 */
418 nv_wr32(dev, 0x400828, 0x07830610);
419 nv_wr32(dev, 0x40082c, 0x0000016A);
420 break;
421 default:
422 break;
423 }
424
425 nv_wr32(dev, 0x400b38, 0x2ffff800);
426 nv_wr32(dev, 0x400b3c, 0x00006000);
427
428 /* copy tile info from PFB */
429 switch (dev_priv->chipset) {
430 case 0x40: /* vanilla NV40 */
431 for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
432 tmp = nv_rd32(dev, NV10_PFB_TILE(i));
433 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
434 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
435 tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
436 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
437 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
438 tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
439 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
440 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
441 tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
442 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
443 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
444 }
445 break;
446 case 0x44:
447 case 0x4a:
448 case 0x4e: /* NV44-based cores don't have 0x406900? */
449 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
450 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
451 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
452 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
453 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
454 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
455 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
456 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
457 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
458 }
459 break;
460 case 0x46:
461 case 0x47:
462 case 0x49:
463 case 0x4b: /* G7X-based cores */
464 for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
465 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
466 nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
467 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
468 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
469 nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
470 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
471 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
472 nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
473 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
474 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
475 nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
476 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
477 }
478 break;
479 default: /* everything else */
480 for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
481 tmp = nv_rd32(dev, NV40_PFB_TILE(i));
482 nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
483 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
484 tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
485 nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
486 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
487 tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
488 nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
489 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
490 tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
491 nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
492 nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
493 }
494 break;
495 }
496
497 /* begin RAM config */
498 vramsz = drm_get_resource_len(dev, 0) - 1;
499 switch (dev_priv->chipset) {
500 case 0x40:
501 nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
502 nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
503 nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
504 nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
505 nv_wr32(dev, 0x400820, 0);
506 nv_wr32(dev, 0x400824, 0);
507 nv_wr32(dev, 0x400864, vramsz);
508 nv_wr32(dev, 0x400868, vramsz);
509 break;
510 default:
511 switch (dev_priv->chipset) {
512 case 0x46:
513 case 0x47:
514 case 0x49:
515 case 0x4b:
516 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
517 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
518 break;
519 default:
520 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
521 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
522 break;
523 }
524 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
525 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
526 nv_wr32(dev, 0x400840, 0);
527 nv_wr32(dev, 0x400844, 0);
528 nv_wr32(dev, 0x4008A0, vramsz);
529 nv_wr32(dev, 0x4008A4, vramsz);
530 break;
531 }
532
533 return 0;
534}
535
536void nv40_graph_takedown(struct drm_device *dev)
537{
538}
539
540struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
541 { 0x0030, false, NULL }, /* null */
542 { 0x0039, false, NULL }, /* m2mf */
543 { 0x004a, false, NULL }, /* gdirect */
544 { 0x009f, false, NULL }, /* imageblit (nv12) */
545 { 0x008a, false, NULL }, /* ifc */
546 { 0x0089, false, NULL }, /* sifm */
547 { 0x3089, false, NULL }, /* sifm (nv40) */
548 { 0x0062, false, NULL }, /* surf2d */
549 { 0x3062, false, NULL }, /* surf2d (nv40) */
550 { 0x0043, false, NULL }, /* rop */
551 { 0x0012, false, NULL }, /* beta1 */
552 { 0x0072, false, NULL }, /* beta4 */
553 { 0x0019, false, NULL }, /* cliprect */
554 { 0x0044, false, NULL }, /* pattern */
555 { 0x309e, false, NULL }, /* swzsurf */
556 { 0x4097, false, NULL }, /* curie (nv40) */
557 { 0x4497, false, NULL }, /* curie (nv44) */
558 {}
559};
560
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 000000000000..2a3495e848e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
1#include "drmP.h"
2#include "drm.h"
3#include "nouveau_drv.h"
4#include "nouveau_drm.h"
5
6int
7nv40_mc_init(struct drm_device *dev)
8{
9 struct drm_nouveau_private *dev_priv = dev->dev_private;
10 uint32_t tmp;
11
12 /* Power up everything, resetting each individual unit will
13 * be done later if needed.
14 */
15 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
16
17 switch (dev_priv->chipset) {
18 case 0x44:
19 case 0x46: /* G72 */
20 case 0x4e:
21 case 0x4c: /* C51_G7X */
22 tmp = nv_rd32(dev, NV40_PFB_020C);
23 nv_wr32(dev, NV40_PMC_1700, tmp);
24 nv_wr32(dev, NV40_PMC_1704, 0);
25 nv_wr32(dev, NV40_PMC_1708, 0);
26 nv_wr32(dev, NV40_PMC_170C, tmp);
27 break;
28 default:
29 break;
30 }
31
32 return 0;
33}
34
35void
36nv40_mc_takedown(struct drm_device *dev)
37{
38}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
new file mode 100644
index 000000000000..f8e28a1e44e7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -0,0 +1,769 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_mode.h"
29#include "drm_crtc_helper.h"
30
31#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
32#include "nouveau_reg.h"
33#include "nouveau_drv.h"
34#include "nouveau_hw.h"
35#include "nouveau_encoder.h"
36#include "nouveau_crtc.h"
37#include "nouveau_fb.h"
38#include "nouveau_connector.h"
39#include "nv50_display.h"
40
41static void
42nv50_crtc_lut_load(struct drm_crtc *crtc)
43{
44 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
45 void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
46 int i;
47
48 NV_DEBUG(crtc->dev, "\n");
49
50 for (i = 0; i < 256; i++) {
51 writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
52 writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
53 writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
54 }
55
56 if (nv_crtc->lut.depth == 30) {
57 writew(nv_crtc->lut.r[i - 1] >> 2, lut + 8*i + 0);
58 writew(nv_crtc->lut.g[i - 1] >> 2, lut + 8*i + 2);
59 writew(nv_crtc->lut.b[i - 1] >> 2, lut + 8*i + 4);
60 }
61}
62
63int
64nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
65{
66 struct drm_device *dev = nv_crtc->base.dev;
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *evo = dev_priv->evo;
69 int index = nv_crtc->index, ret;
70
71 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
72 NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
73
74 if (blanked) {
75 nv_crtc->cursor.hide(nv_crtc, false);
76
77 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 7 : 5);
78 if (ret) {
79 NV_ERROR(dev, "no space while blanking crtc\n");
80 return ret;
81 }
82 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
83 OUT_RING(evo, NV50_EVO_CRTC_CLUT_MODE_BLANK);
84 OUT_RING(evo, 0);
85 if (dev_priv->chipset != 0x50) {
86 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
87 OUT_RING(evo, NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE);
88 }
89
90 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
91 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
92 } else {
93 if (nv_crtc->cursor.visible)
94 nv_crtc->cursor.show(nv_crtc, false);
95 else
96 nv_crtc->cursor.hide(nv_crtc, false);
97
98 ret = RING_SPACE(evo, dev_priv->chipset != 0x50 ? 10 : 8);
99 if (ret) {
100 NV_ERROR(dev, "no space while unblanking crtc\n");
101 return ret;
102 }
103 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, CLUT_MODE), 2);
104 OUT_RING(evo, nv_crtc->lut.depth == 8 ?
105 NV50_EVO_CRTC_CLUT_MODE_OFF :
106 NV50_EVO_CRTC_CLUT_MODE_ON);
107 OUT_RING(evo, (nv_crtc->lut.nvbo->bo.mem.mm_node->start <<
108 PAGE_SHIFT) >> 8);
109 if (dev_priv->chipset != 0x50) {
110 BEGIN_RING(evo, 0, NV84_EVO_CRTC(index, CLUT_DMA), 1);
111 OUT_RING(evo, NvEvoVRAM);
112 }
113
114 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_OFFSET), 2);
115 OUT_RING(evo, nv_crtc->fb.offset >> 8);
116 OUT_RING(evo, 0);
117 BEGIN_RING(evo, 0, NV50_EVO_CRTC(index, FB_DMA), 1);
118 if (dev_priv->chipset != 0x50)
119 if (nv_crtc->fb.tile_flags == 0x7a00)
120 OUT_RING(evo, NvEvoFB32);
121 else
122 if (nv_crtc->fb.tile_flags == 0x7000)
123 OUT_RING(evo, NvEvoFB16);
124 else
125 OUT_RING(evo, NvEvoVRAM);
126 else
127 OUT_RING(evo, NvEvoVRAM);
128 }
129
130 nv_crtc->fb.blanked = blanked;
131 return 0;
132}
133
134static int
135nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
136{
137 struct drm_device *dev = nv_crtc->base.dev;
138 struct drm_nouveau_private *dev_priv = dev->dev_private;
139 struct nouveau_channel *evo = dev_priv->evo;
140 int ret;
141
142 NV_DEBUG(dev, "\n");
143
144 ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
145 if (ret) {
146 NV_ERROR(dev, "no space while setting dither\n");
147 return ret;
148 }
149
150 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
151 if (on)
152 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
153 else
154 OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
155
156 if (update) {
157 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
158 OUT_RING(evo, 0);
159 FIRE_RING(evo);
160 }
161
162 return 0;
163}
164
165struct nouveau_connector *
166nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
167{
168 struct drm_device *dev = nv_crtc->base.dev;
169 struct drm_connector *connector;
170 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
171
172 /* The safest approach is to find an encoder with the right crtc, that
173 * is also linked to a connector. */
174 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
175 if (connector->encoder)
176 if (connector->encoder->crtc == crtc)
177 return nouveau_connector(connector);
178 }
179
180 return NULL;
181}
182
183static int
184nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
185{
186 struct nouveau_connector *nv_connector =
187 nouveau_crtc_connector_get(nv_crtc);
188 struct drm_device *dev = nv_crtc->base.dev;
189 struct drm_nouveau_private *dev_priv = dev->dev_private;
190 struct nouveau_channel *evo = dev_priv->evo;
191 struct drm_display_mode *native_mode = NULL;
192 struct drm_display_mode *mode = &nv_crtc->base.mode;
193 uint32_t outX, outY, horiz, vert;
194 int ret;
195
196 NV_DEBUG(dev, "\n");
197
198 switch (scaling_mode) {
199 case DRM_MODE_SCALE_NONE:
200 break;
201 default:
202 if (!nv_connector || !nv_connector->native_mode) {
203 NV_ERROR(dev, "No native mode, forcing panel scaling\n");
204 scaling_mode = DRM_MODE_SCALE_NONE;
205 } else {
206 native_mode = nv_connector->native_mode;
207 }
208 break;
209 }
210
211 switch (scaling_mode) {
212 case DRM_MODE_SCALE_ASPECT:
213 horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
214 vert = (native_mode->vdisplay << 19) / mode->vdisplay;
215
216 if (vert > horiz) {
217 outX = (mode->hdisplay * horiz) >> 19;
218 outY = (mode->vdisplay * horiz) >> 19;
219 } else {
220 outX = (mode->hdisplay * vert) >> 19;
221 outY = (mode->vdisplay * vert) >> 19;
222 }
223 break;
224 case DRM_MODE_SCALE_FULLSCREEN:
225 outX = native_mode->hdisplay;
226 outY = native_mode->vdisplay;
227 break;
228 case DRM_MODE_SCALE_CENTER:
229 case DRM_MODE_SCALE_NONE:
230 default:
231 outX = mode->hdisplay;
232 outY = mode->vdisplay;
233 break;
234 }
235
236 ret = RING_SPACE(evo, update ? 7 : 5);
237 if (ret)
238 return ret;
239
240 /* Got a better name for SCALER_ACTIVE? */
241 /* One day i've got to really figure out why this is needed. */
242 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
243 if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
244 (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
245 mode->hdisplay != outX || mode->vdisplay != outY) {
246 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
247 } else {
248 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
249 }
250
251 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
252 OUT_RING(evo, outY << 16 | outX);
253 OUT_RING(evo, outY << 16 | outX);
254
255 if (update) {
256 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
257 OUT_RING(evo, 0);
258 FIRE_RING(evo);
259 }
260
261 return 0;
262}
263
264int
265nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
266{
267 uint32_t pll_reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
268 struct nouveau_pll_vals pll;
269 struct pll_lims limits;
270 uint32_t reg1, reg2;
271 int ret;
272
273 ret = get_pll_limits(dev, pll_reg, &limits);
274 if (ret)
275 return ret;
276
277 ret = nouveau_calc_pll_mnp(dev, &limits, pclk, &pll);
278 if (ret <= 0)
279 return ret;
280
281 if (limits.vco2.maxfreq) {
282 reg1 = nv_rd32(dev, pll_reg + 4) & 0xff00ff00;
283 reg2 = nv_rd32(dev, pll_reg + 8) & 0x8000ff00;
284 nv_wr32(dev, pll_reg, 0x10000611);
285 nv_wr32(dev, pll_reg + 4, reg1 | (pll.M1 << 16) | pll.N1);
286 nv_wr32(dev, pll_reg + 8,
287 reg2 | (pll.log2P << 28) | (pll.M2 << 16) | pll.N2);
288 } else {
289 reg1 = nv_rd32(dev, pll_reg + 4) & 0xffc00000;
290 nv_wr32(dev, pll_reg, 0x50000610);
291 nv_wr32(dev, pll_reg + 4, reg1 |
292 (pll.log2P << 16) | (pll.M1 << 8) | pll.N1);
293 }
294
295 return 0;
296}
297
298static void
299nv50_crtc_destroy(struct drm_crtc *crtc)
300{
301 struct drm_device *dev = crtc->dev;
302 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
303
304 NV_DEBUG(dev, "\n");
305
306 if (!crtc)
307 return;
308
309 drm_crtc_cleanup(&nv_crtc->base);
310
311 nv50_cursor_fini(nv_crtc);
312
313 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
314 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
315 kfree(nv_crtc->mode);
316 kfree(nv_crtc);
317}
318
319int
320nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
321 uint32_t buffer_handle, uint32_t width, uint32_t height)
322{
323 struct drm_device *dev = crtc->dev;
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
326 struct nouveau_bo *cursor = NULL;
327 struct drm_gem_object *gem;
328 int ret = 0, i;
329
330 if (width != 64 || height != 64)
331 return -EINVAL;
332
333 if (!buffer_handle) {
334 nv_crtc->cursor.hide(nv_crtc, true);
335 return 0;
336 }
337
338 gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
339 if (!gem)
340 return -EINVAL;
341 cursor = nouveau_gem_object(gem);
342
343 ret = nouveau_bo_map(cursor);
344 if (ret)
345 goto out;
346
347 /* The simple will do for now. */
348 for (i = 0; i < 64 * 64; i++)
349 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, nouveau_bo_rd32(cursor, i));
350
351 nouveau_bo_unmap(cursor);
352
353 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset -
354 dev_priv->vm_vram_base);
355 nv_crtc->cursor.show(nv_crtc, true);
356
357out:
358 mutex_lock(&dev->struct_mutex);
359 drm_gem_object_unreference(gem);
360 mutex_unlock(&dev->struct_mutex);
361 return ret;
362}
363
364int
365nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
366{
367 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
368
369 nv_crtc->cursor.set_pos(nv_crtc, x, y);
370 return 0;
371}
372
373static void
374nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
375 uint32_t size)
376{
377 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
378 int i;
379
380 if (size != 256)
381 return;
382
383 for (i = 0; i < 256; i++) {
384 nv_crtc->lut.r[i] = r[i];
385 nv_crtc->lut.g[i] = g[i];
386 nv_crtc->lut.b[i] = b[i];
387 }
388
389 /* We need to know the depth before we upload, but it's possible to
390 * get called before a framebuffer is bound. If this is the case,
391 * mark the lut values as dirty by setting depth==0, and it'll be
392 * uploaded on the first mode_set_base()
393 */
394 if (!nv_crtc->base.fb) {
395 nv_crtc->lut.depth = 0;
396 return;
397 }
398
399 nv50_crtc_lut_load(crtc);
400}
401
402static void
403nv50_crtc_save(struct drm_crtc *crtc)
404{
405 NV_ERROR(crtc->dev, "!!\n");
406}
407
408static void
409nv50_crtc_restore(struct drm_crtc *crtc)
410{
411 NV_ERROR(crtc->dev, "!!\n");
412}
413
414static const struct drm_crtc_funcs nv50_crtc_funcs = {
415 .save = nv50_crtc_save,
416 .restore = nv50_crtc_restore,
417 .cursor_set = nv50_crtc_cursor_set,
418 .cursor_move = nv50_crtc_cursor_move,
419 .gamma_set = nv50_crtc_gamma_set,
420 .set_config = drm_crtc_helper_set_config,
421 .destroy = nv50_crtc_destroy,
422};
423
424static void
425nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
426{
427}
428
429static void
430nv50_crtc_prepare(struct drm_crtc *crtc)
431{
432 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
433 struct drm_device *dev = crtc->dev;
434 struct drm_encoder *encoder;
435
436 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
437
438 /* Disconnect all unused encoders. */
439 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
440 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
441
442 if (drm_helper_encoder_in_use(encoder))
443 continue;
444
445 nv_encoder->disconnect(nv_encoder);
446 }
447
448 nv50_crtc_blank(nv_crtc, true);
449}
450
451static void
452nv50_crtc_commit(struct drm_crtc *crtc)
453{
454 struct drm_crtc *crtc2;
455 struct drm_device *dev = crtc->dev;
456 struct drm_nouveau_private *dev_priv = dev->dev_private;
457 struct nouveau_channel *evo = dev_priv->evo;
458 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
459 int ret;
460
461 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
462
463 nv50_crtc_blank(nv_crtc, false);
464
465 /* Explicitly blank all unused crtc's. */
466 list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
467 if (!drm_helper_crtc_in_use(crtc2))
468 nv50_crtc_blank(nouveau_crtc(crtc2), true);
469 }
470
471 ret = RING_SPACE(evo, 2);
472 if (ret) {
473 NV_ERROR(dev, "no space while committing crtc\n");
474 return;
475 }
476 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
477 OUT_RING(evo, 0);
478 FIRE_RING(evo);
479}
480
481static bool
482nv50_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
483 struct drm_display_mode *adjusted_mode)
484{
485 return true;
486}
487
488static int
489nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
490 struct drm_framebuffer *old_fb, bool update)
491{
492 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
493 struct drm_device *dev = nv_crtc->base.dev;
494 struct drm_nouveau_private *dev_priv = dev->dev_private;
495 struct nouveau_channel *evo = dev_priv->evo;
496 struct drm_framebuffer *drm_fb = nv_crtc->base.fb;
497 struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
498 int ret, format;
499
500 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
501
502 switch (drm_fb->depth) {
503 case 8:
504 format = NV50_EVO_CRTC_FB_DEPTH_8;
505 break;
506 case 15:
507 format = NV50_EVO_CRTC_FB_DEPTH_15;
508 break;
509 case 16:
510 format = NV50_EVO_CRTC_FB_DEPTH_16;
511 break;
512 case 24:
513 case 32:
514 format = NV50_EVO_CRTC_FB_DEPTH_24;
515 break;
516 case 30:
517 format = NV50_EVO_CRTC_FB_DEPTH_30;
518 break;
519 default:
520 NV_ERROR(dev, "unknown depth %d\n", drm_fb->depth);
521 return -EINVAL;
522 }
523
524 ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
525 if (ret)
526 return ret;
527
528 if (old_fb) {
529 struct nouveau_framebuffer *ofb = nouveau_framebuffer(old_fb);
530 nouveau_bo_unpin(ofb->nvbo);
531 }
532
533 nv_crtc->fb.offset = fb->nvbo->bo.offset - dev_priv->vm_vram_base;
534 nv_crtc->fb.tile_flags = fb->nvbo->tile_flags;
535 nv_crtc->fb.cpp = drm_fb->bits_per_pixel / 8;
536 if (!nv_crtc->fb.blanked && dev_priv->chipset != 0x50) {
537 ret = RING_SPACE(evo, 2);
538 if (ret)
539 return ret;
540
541 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_DMA), 1);
542 if (nv_crtc->fb.tile_flags == 0x7a00)
543 OUT_RING(evo, NvEvoFB32);
544 else
545 if (nv_crtc->fb.tile_flags == 0x7000)
546 OUT_RING(evo, NvEvoFB16);
547 else
548 OUT_RING(evo, NvEvoVRAM);
549 }
550
551 ret = RING_SPACE(evo, 12);
552 if (ret)
553 return ret;
554
555 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_OFFSET), 5);
556 OUT_RING(evo, nv_crtc->fb.offset >> 8);
557 OUT_RING(evo, 0);
558 OUT_RING(evo, (drm_fb->height << 16) | drm_fb->width);
559 if (!nv_crtc->fb.tile_flags) {
560 OUT_RING(evo, drm_fb->pitch | (1 << 20));
561 } else {
562 OUT_RING(evo, ((drm_fb->pitch / 4) << 4) |
563 fb->nvbo->tile_mode);
564 }
565 if (dev_priv->chipset == 0x50)
566 OUT_RING(evo, (fb->nvbo->tile_flags << 8) | format);
567 else
568 OUT_RING(evo, format);
569
570 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLUT_MODE), 1);
571 OUT_RING(evo, fb->base.depth == 8 ?
572 NV50_EVO_CRTC_CLUT_MODE_OFF : NV50_EVO_CRTC_CLUT_MODE_ON);
573
574 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, COLOR_CTRL), 1);
575 OUT_RING(evo, NV50_EVO_CRTC_COLOR_CTRL_COLOR);
576 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, FB_POS), 1);
577 OUT_RING(evo, (y << 16) | x);
578
579 if (nv_crtc->lut.depth != fb->base.depth) {
580 nv_crtc->lut.depth = fb->base.depth;
581 nv50_crtc_lut_load(crtc);
582 }
583
584 if (update) {
585 ret = RING_SPACE(evo, 2);
586 if (ret)
587 return ret;
588 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
589 OUT_RING(evo, 0);
590 FIRE_RING(evo);
591 }
592
593 return 0;
594}
595
596static int
597nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
598 struct drm_display_mode *adjusted_mode, int x, int y,
599 struct drm_framebuffer *old_fb)
600{
601 struct drm_device *dev = crtc->dev;
602 struct drm_nouveau_private *dev_priv = dev->dev_private;
603 struct nouveau_channel *evo = dev_priv->evo;
604 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
605 struct nouveau_connector *nv_connector = NULL;
606 uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
607 uint32_t hunk1, vunk1, vunk2a, vunk2b;
608 int ret;
609
610 /* Find the connector attached to this CRTC */
611 nv_connector = nouveau_crtc_connector_get(nv_crtc);
612
613 *nv_crtc->mode = *adjusted_mode;
614
615 NV_DEBUG(dev, "index %d\n", nv_crtc->index);
616
617 hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
618 vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
619 hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
620 vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
621 /* I can't give this a proper name, anyone else can? */
622 hunk1 = adjusted_mode->htotal -
623 adjusted_mode->hsync_start + adjusted_mode->hdisplay;
624 vunk1 = adjusted_mode->vtotal -
625 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
626 /* Another strange value, this time only for interlaced adjusted_modes. */
627 vunk2a = 2 * adjusted_mode->vtotal -
628 adjusted_mode->vsync_start + adjusted_mode->vdisplay;
629 vunk2b = adjusted_mode->vtotal -
630 adjusted_mode->vsync_start + adjusted_mode->vtotal;
631
632 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
633 vsync_dur /= 2;
634 vsync_start_to_end /= 2;
635 vunk1 /= 2;
636 vunk2a /= 2;
637 vunk2b /= 2;
638 /* magic */
639 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
640 vsync_start_to_end -= 1;
641 vunk1 -= 1;
642 vunk2a -= 1;
643 vunk2b -= 1;
644 }
645 }
646
647 ret = RING_SPACE(evo, 17);
648 if (ret)
649 return ret;
650
651 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
652 OUT_RING(evo, adjusted_mode->clock | 0x800000);
653 OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
654
655 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
656 OUT_RING(evo, 0);
657 OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
658 OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
659 OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
660 (hsync_start_to_end - 1));
661 OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
662
663 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
664 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
665 OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
666 } else {
667 OUT_RING(evo, 0);
668 OUT_RING(evo, 0);
669 }
670
671 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
672 OUT_RING(evo, 0);
673
674 /* This is the actual resolution of the mode. */
675 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
676 OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
677 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
678 OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
679
680 nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
681 nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
682
683 return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, false);
684}
685
686static int
687nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
688 struct drm_framebuffer *old_fb)
689{
690 return nv50_crtc_do_mode_set_base(crtc, x, y, old_fb, true);
691}
692
693static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
694 .dpms = nv50_crtc_dpms,
695 .prepare = nv50_crtc_prepare,
696 .commit = nv50_crtc_commit,
697 .mode_fixup = nv50_crtc_mode_fixup,
698 .mode_set = nv50_crtc_mode_set,
699 .mode_set_base = nv50_crtc_mode_set_base,
700 .load_lut = nv50_crtc_lut_load,
701};
702
703int
704nv50_crtc_create(struct drm_device *dev, int index)
705{
706 struct nouveau_crtc *nv_crtc = NULL;
707 int ret, i;
708
709 NV_DEBUG(dev, "\n");
710
711 nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
712 if (!nv_crtc)
713 return -ENOMEM;
714
715 nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
716 if (!nv_crtc->mode) {
717 kfree(nv_crtc);
718 return -ENOMEM;
719 }
720
721 /* Default CLUT parameters, will be activated on the hw upon
722 * first mode set.
723 */
724 for (i = 0; i < 256; i++) {
725 nv_crtc->lut.r[i] = i << 8;
726 nv_crtc->lut.g[i] = i << 8;
727 nv_crtc->lut.b[i] = i << 8;
728 }
729 nv_crtc->lut.depth = 0;
730
731 ret = nouveau_bo_new(dev, NULL, 4096, 0x100, TTM_PL_FLAG_VRAM,
732 0, 0x0000, false, true, &nv_crtc->lut.nvbo);
733 if (!ret) {
734 ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
735 if (!ret)
736 ret = nouveau_bo_map(nv_crtc->lut.nvbo);
737 if (ret)
738 nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
739 }
740
741 if (ret) {
742 kfree(nv_crtc->mode);
743 kfree(nv_crtc);
744 return ret;
745 }
746
747 nv_crtc->index = index;
748
749 /* set function pointers */
750 nv_crtc->set_dither = nv50_crtc_set_dither;
751 nv_crtc->set_scale = nv50_crtc_set_scale;
752
753 drm_crtc_init(dev, &nv_crtc->base, &nv50_crtc_funcs);
754 drm_crtc_helper_add(&nv_crtc->base, &nv50_crtc_helper_funcs);
755 drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
756
757 ret = nouveau_bo_new(dev, NULL, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
758 0, 0x0000, false, true, &nv_crtc->cursor.nvbo);
759 if (!ret) {
760 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
761 if (!ret)
762 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
763 if (ret)
764 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
765 }
766
767 nv50_cursor_init(nv_crtc);
768 return 0;
769}
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c
new file mode 100644
index 000000000000..e2e79a8f220d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_cursor.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_mode.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_crtc.h"
34#include "nv50_display.h"
35
36static void
37nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
38{
39 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
40 struct nouveau_channel *evo = dev_priv->evo;
41 struct drm_device *dev = nv_crtc->base.dev;
42 int ret;
43
44 NV_DEBUG(dev, "\n");
45
46 if (update && nv_crtc->cursor.visible)
47 return;
48
49 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while unhiding cursor\n");
52 return;
53 }
54
55 if (dev_priv->chipset != 0x50) {
56 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
57 OUT_RING(evo, NvEvoVRAM);
58 }
59 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
60 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_SHOW);
61 OUT_RING(evo, nv_crtc->cursor.offset >> 8);
62
63 if (update) {
64 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
65 OUT_RING(evo, 0);
66 FIRE_RING(evo);
67 nv_crtc->cursor.visible = true;
68 }
69}
70
71static void
72nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
73{
74 struct drm_nouveau_private *dev_priv = nv_crtc->base.dev->dev_private;
75 struct nouveau_channel *evo = dev_priv->evo;
76 struct drm_device *dev = nv_crtc->base.dev;
77 int ret;
78
79 NV_DEBUG(dev, "\n");
80
81 if (update && !nv_crtc->cursor.visible)
82 return;
83
84 ret = RING_SPACE(evo, (dev_priv->chipset != 0x50 ? 5 : 3) + update * 2);
85 if (ret) {
86 NV_ERROR(dev, "no space while hiding cursor\n");
87 return;
88 }
89 BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CURSOR_CTRL), 2);
90 OUT_RING(evo, NV50_EVO_CRTC_CURSOR_CTRL_HIDE);
91 OUT_RING(evo, 0);
92 if (dev_priv->chipset != 0x50) {
93 BEGIN_RING(evo, 0, NV84_EVO_CRTC(nv_crtc->index, CURSOR_DMA), 1);
94 OUT_RING(evo, NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE);
95 }
96
97 if (update) {
98 BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
99 OUT_RING(evo, 0);
100 FIRE_RING(evo);
101 nv_crtc->cursor.visible = false;
102 }
103}
104
105static void
106nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
107{
108 struct drm_device *dev = nv_crtc->base.dev;
109
110 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS(nv_crtc->index),
111 ((y & 0xFFFF) << 16) | (x & 0xFFFF));
112 /* Needed to make the cursor move. */
113 nv_wr32(dev, NV50_PDISPLAY_CURSOR_USER_POS_CTRL(nv_crtc->index), 0);
114}
115
116static void
117nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
118{
119 NV_DEBUG(nv_crtc->base.dev, "\n");
120 if (offset == nv_crtc->cursor.offset)
121 return;
122
123 nv_crtc->cursor.offset = offset;
124 if (nv_crtc->cursor.visible) {
125 nv_crtc->cursor.visible = false;
126 nv_crtc->cursor.show(nv_crtc, true);
127 }
128}
129
130int
131nv50_cursor_init(struct nouveau_crtc *nv_crtc)
132{
133 nv_crtc->cursor.set_offset = nv50_cursor_set_offset;
134 nv_crtc->cursor.set_pos = nv50_cursor_set_pos;
135 nv_crtc->cursor.hide = nv50_cursor_hide;
136 nv_crtc->cursor.show = nv50_cursor_show;
137 return 0;
138}
139
140void
141nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
142{
143 struct drm_device *dev = nv_crtc->base.dev;
144 int idx = nv_crtc->index;
145
146 NV_DEBUG(dev, "\n");
147
148 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
149 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
150 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
151 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
152 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
153 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx)));
154 }
155}
156
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
new file mode 100644
index 000000000000..fb5838e3be24
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -0,0 +1,304 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39static void
40nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
41{
42 struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret;
46
47 NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
48
49 ret = RING_SPACE(evo, 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while disconnecting DAC\n");
52 return;
53 }
54 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
55 OUT_RING(evo, 0);
56}
57
58static enum drm_connector_status
59nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
60{
61 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
62 struct drm_device *dev = encoder->dev;
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 enum drm_connector_status status = connector_status_disconnected;
65 uint32_t dpms_state, load_pattern, load_state;
66 int or = nv_encoder->or;
67
68 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(or), 0x00000001);
69 dpms_state = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or));
70
71 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or),
72 0x00150000 | NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
73 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
74 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
75 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
76 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
77 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
78 return status;
79 }
80
81 /* Use bios provided value if possible. */
82 if (dev_priv->vbios->dactestval) {
83 load_pattern = dev_priv->vbios->dactestval;
84 NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
85 load_pattern);
86 } else {
87 load_pattern = 340;
88 NV_DEBUG(dev, "Using default load_pattern of %d\n",
89 load_pattern);
90 }
91
92 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or),
93 NV50_PDISPLAY_DAC_LOAD_CTRL_ACTIVE | load_pattern);
94 mdelay(45); /* give it some time to process */
95 load_state = nv_rd32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or));
96
97 nv_wr32(dev, NV50_PDISPLAY_DAC_LOAD_CTRL(or), 0);
98 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), dpms_state |
99 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
100
101 if ((load_state & NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT) ==
102 NV50_PDISPLAY_DAC_LOAD_CTRL_PRESENT)
103 status = connector_status_connected;
104
105 if (status == connector_status_connected)
106 NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
107 else
108 NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
109
110 return status;
111}
112
113static void
114nv50_dac_dpms(struct drm_encoder *encoder, int mode)
115{
116 struct drm_device *dev = encoder->dev;
117 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
118 uint32_t val;
119 int or = nv_encoder->or;
120
121 NV_DEBUG(dev, "or %d mode %d\n", or, mode);
122
123 /* wait for it to be done */
124 if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
125 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING, 0)) {
126 NV_ERROR(dev, "timeout: DAC_DPMS_CTRL_PENDING(%d) == 0\n", or);
127 NV_ERROR(dev, "DAC_DPMS_CTRL(%d) = 0x%08x\n", or,
128 nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)));
129 return;
130 }
131
132 val = nv_rd32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or)) & ~0x7F;
133
134 if (mode != DRM_MODE_DPMS_ON)
135 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_BLANKED;
136
137 switch (mode) {
138 case DRM_MODE_DPMS_STANDBY:
139 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
140 break;
141 case DRM_MODE_DPMS_SUSPEND:
142 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
143 break;
144 case DRM_MODE_DPMS_OFF:
145 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_OFF;
146 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_HSYNC_OFF;
147 val |= NV50_PDISPLAY_DAC_DPMS_CTRL_VSYNC_OFF;
148 break;
149 default:
150 break;
151 }
152
153 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(or), val |
154 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
155}
156
157static void
158nv50_dac_save(struct drm_encoder *encoder)
159{
160 NV_ERROR(encoder->dev, "!!\n");
161}
162
163static void
164nv50_dac_restore(struct drm_encoder *encoder)
165{
166 NV_ERROR(encoder->dev, "!!\n");
167}
168
169static bool
170nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
171 struct drm_display_mode *adjusted_mode)
172{
173 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
174 struct nouveau_connector *connector;
175
176 NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
177
178 connector = nouveau_encoder_connector_get(nv_encoder);
179 if (!connector) {
180 NV_ERROR(encoder->dev, "Encoder has no connector\n");
181 return false;
182 }
183
184 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
185 connector->native_mode) {
186 int id = adjusted_mode->base.id;
187 *adjusted_mode = *connector->native_mode;
188 adjusted_mode->base.id = id;
189 }
190
191 return true;
192}
193
194static void
195nv50_dac_prepare(struct drm_encoder *encoder)
196{
197}
198
199static void
200nv50_dac_commit(struct drm_encoder *encoder)
201{
202}
203
204static void
205nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode)
207{
208 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
209 struct drm_device *dev = encoder->dev;
210 struct drm_nouveau_private *dev_priv = dev->dev_private;
211 struct nouveau_channel *evo = dev_priv->evo;
212 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
213 uint32_t mode_ctl = 0, mode_ctl2 = 0;
214 int ret;
215
216 NV_DEBUG(dev, "or %d\n", nv_encoder->or);
217
218 nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
219
220 if (crtc->index == 1)
221 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC1;
222 else
223 mode_ctl |= NV50_EVO_DAC_MODE_CTRL_CRTC0;
224
225 /* Lacking a working tv-out, this is not a 100% sure. */
226 if (nv_encoder->dcb->type == OUTPUT_ANALOG)
227 mode_ctl |= 0x40;
228 else
229 if (nv_encoder->dcb->type == OUTPUT_TV)
230 mode_ctl |= 0x100;
231
232 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
233 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NHSYNC;
234
235 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
236 mode_ctl2 |= NV50_EVO_DAC_MODE_CTRL2_NVSYNC;
237
238 ret = RING_SPACE(evo, 3);
239 if (ret) {
240 NV_ERROR(dev, "no space while connecting DAC\n");
241 return;
242 }
243 BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
244 OUT_RING(evo, mode_ctl);
245 OUT_RING(evo, mode_ctl2);
246}
247
248static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
249 .dpms = nv50_dac_dpms,
250 .save = nv50_dac_save,
251 .restore = nv50_dac_restore,
252 .mode_fixup = nv50_dac_mode_fixup,
253 .prepare = nv50_dac_prepare,
254 .commit = nv50_dac_commit,
255 .mode_set = nv50_dac_mode_set,
256 .detect = nv50_dac_detect
257};
258
259static void
260nv50_dac_destroy(struct drm_encoder *encoder)
261{
262 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
263
264 if (!encoder)
265 return;
266
267 NV_DEBUG(encoder->dev, "\n");
268
269 drm_encoder_cleanup(encoder);
270 kfree(nv_encoder);
271}
272
273static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
274 .destroy = nv50_dac_destroy,
275};
276
277int
278nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
279{
280 struct nouveau_encoder *nv_encoder;
281 struct drm_encoder *encoder;
282
283 NV_DEBUG(dev, "\n");
284 NV_INFO(dev, "Detected a DAC output\n");
285
286 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
287 if (!nv_encoder)
288 return -ENOMEM;
289 encoder = to_drm_encoder(nv_encoder);
290
291 nv_encoder->dcb = entry;
292 nv_encoder->or = ffs(entry->or) - 1;
293
294 nv_encoder->disconnect = nv50_dac_disconnect;
295
296 drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
297 DRM_MODE_ENCODER_DAC);
298 drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
299
300 encoder->possible_crtcs = entry->heads;
301 encoder->possible_clones = 0;
302 return 0;
303}
304
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
new file mode 100644
index 000000000000..12c5ee63495b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -0,0 +1,1015 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "nv50_display.h"
28#include "nouveau_crtc.h"
29#include "nouveau_encoder.h"
30#include "nouveau_connector.h"
31#include "nouveau_fb.h"
32#include "drm_crtc_helper.h"
33
34static void
35nv50_evo_channel_del(struct nouveau_channel **pchan)
36{
37 struct nouveau_channel *chan = *pchan;
38
39 if (!chan)
40 return;
41 *pchan = NULL;
42
43 nouveau_gpuobj_channel_takedown(chan);
44 nouveau_bo_ref(NULL, &chan->pushbuf_bo);
45
46 if (chan->user)
47 iounmap(chan->user);
48
49 kfree(chan);
50}
51
52static int
53nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
54 uint32_t tile_flags, uint32_t magic_flags,
55 uint32_t offset, uint32_t limit)
56{
57 struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
58 struct drm_device *dev = evo->dev;
59 struct nouveau_gpuobj *obj = NULL;
60 int ret;
61
62 ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
63 if (ret)
64 return ret;
65 obj->engine = NVOBJ_ENGINE_DISPLAY;
66
67 ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
68 if (ret) {
69 nouveau_gpuobj_del(dev, &obj);
70 return ret;
71 }
72
73 dev_priv->engine.instmem.prepare_access(dev, true);
74 nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
75 nv_wo32(dev, obj, 1, limit);
76 nv_wo32(dev, obj, 2, offset);
77 nv_wo32(dev, obj, 3, 0x00000000);
78 nv_wo32(dev, obj, 4, 0x00000000);
79 nv_wo32(dev, obj, 5, 0x00010000);
80 dev_priv->engine.instmem.finish_access(dev);
81
82 return 0;
83}
84
85static int
86nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
87{
88 struct drm_nouveau_private *dev_priv = dev->dev_private;
89 struct nouveau_channel *chan;
90 int ret;
91
92 chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
93 if (!chan)
94 return -ENOMEM;
95 *pchan = chan;
96
97 chan->id = -1;
98 chan->dev = dev;
99 chan->user_get = 4;
100 chan->user_put = 0;
101
102 INIT_LIST_HEAD(&chan->ramht_refs);
103
104 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
105 NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
106 if (ret) {
107 NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
108 nv50_evo_channel_del(pchan);
109 return ret;
110 }
111
112 ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
113 im_pramin->start, 32768);
114 if (ret) {
115 NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
116 nv50_evo_channel_del(pchan);
117 return ret;
118 }
119
120 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
121 0, &chan->ramht);
122 if (ret) {
123 NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
124 nv50_evo_channel_del(pchan);
125 return ret;
126 }
127
128 if (dev_priv->chipset != 0x50) {
129 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
130 0, 0xffffffff);
131 if (ret) {
132 nv50_evo_channel_del(pchan);
133 return ret;
134 }
135
136
137 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
138 0, 0xffffffff);
139 if (ret) {
140 nv50_evo_channel_del(pchan);
141 return ret;
142 }
143 }
144
145 ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
146 0, nouveau_mem_fb_amount(dev));
147 if (ret) {
148 nv50_evo_channel_del(pchan);
149 return ret;
150 }
151
152 ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
153 false, true, &chan->pushbuf_bo);
154 if (ret == 0)
155 ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
156 if (ret) {
157 NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
158 nv50_evo_channel_del(pchan);
159 return ret;
160 }
161
162 ret = nouveau_bo_map(chan->pushbuf_bo);
163 if (ret) {
164 NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
165 nv50_evo_channel_del(pchan);
166 return ret;
167 }
168
169 chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
170 NV50_PDISPLAY_USER(0), PAGE_SIZE);
171 if (!chan->user) {
172 NV_ERROR(dev, "Error mapping EVO control regs.\n");
173 nv50_evo_channel_del(pchan);
174 return -ENOMEM;
175 }
176
177 return 0;
178}
179
180int
181nv50_display_init(struct drm_device *dev)
182{
183 struct drm_nouveau_private *dev_priv = dev->dev_private;
184 struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
185 struct nouveau_channel *evo = dev_priv->evo;
186 struct drm_connector *connector;
187 uint32_t val, ram_amount, hpd_en[2];
188 uint64_t start;
189 int ret, i;
190
191 NV_DEBUG(dev, "\n");
192
193 nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
194 /*
195 * I think the 0x006101XX range is some kind of main control area
196 * that enables things.
197 */
198 /* CRTC? */
199 for (i = 0; i < 2; i++) {
200 val = nv_rd32(dev, 0x00616100 + (i * 0x800));
201 nv_wr32(dev, 0x00610190 + (i * 0x10), val);
202 val = nv_rd32(dev, 0x00616104 + (i * 0x800));
203 nv_wr32(dev, 0x00610194 + (i * 0x10), val);
204 val = nv_rd32(dev, 0x00616108 + (i * 0x800));
205 nv_wr32(dev, 0x00610198 + (i * 0x10), val);
206 val = nv_rd32(dev, 0x0061610c + (i * 0x800));
207 nv_wr32(dev, 0x0061019c + (i * 0x10), val);
208 }
209 /* DAC */
210 for (i = 0; i < 3; i++) {
211 val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
212 nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
213 }
214 /* SOR */
215 for (i = 0; i < 4; i++) {
216 val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
217 nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
218 }
219 /* Something not yet in use, tv-out maybe. */
220 for (i = 0; i < 3; i++) {
221 val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
222 nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
223 }
224
225 for (i = 0; i < 3; i++) {
226 nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
227 NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
228 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
229 }
230
231 /* This used to be in crtc unblank, but seems out of place there. */
232 nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
233 /* RAM is clamped to 256 MiB. */
234 ram_amount = nouveau_mem_fb_amount(dev);
235 NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
236 if (ram_amount > 256*1024*1024)
237 ram_amount = 256*1024*1024;
238 nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
239 nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
240 nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
241
242 /* The precise purpose is unknown, i suspect it has something to do
243 * with text mode.
244 */
245 if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
246 nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
247 nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
248 if (!nv_wait(0x006194e8, 2, 0)) {
249 NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
250 NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
251 nv_rd32(dev, 0x6194e8));
252 return -EBUSY;
253 }
254 }
255
256 /* taken from nv bug #12637, attempts to un-wedge the hw if it's
257 * stuck in some unspecified state
258 */
259 start = ptimer->read(dev);
260 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
261 while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
262 if ((val & 0x9f0000) == 0x20000)
263 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
264 val | 0x800000);
265
266 if ((val & 0x3f0000) == 0x30000)
267 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
268 val | 0x200000);
269
270 if (ptimer->read(dev) - start > 1000000000ULL) {
271 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
272 NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
273 return -EBUSY;
274 }
275 }
276
277 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
278 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
279 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
280 NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
281 NV_ERROR(dev, "0x610200 = 0x%08x\n",
282 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
283 return -EBUSY;
284 }
285
286 for (i = 0; i < 2; i++) {
287 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
288 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
289 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
290 NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
291 NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
292 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
293 return -EBUSY;
294 }
295
296 nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
297 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
298 if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
299 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
300 NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
301 NV_ERROR(dev, "timeout: "
302 "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
303 NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
304 nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
305 return -EBUSY;
306 }
307 }
308
309 nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
310
311 /* initialise fifo */
312 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
313 ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
314 NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
315 NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
316 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
317 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
318 if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
319 NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
320 NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
321 return -EBUSY;
322 }
323 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
324 (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
325 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
326 nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
327 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
328 NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
329 nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
330
331 evo->dma.max = (4096/4) - 2;
332 evo->dma.put = 0;
333 evo->dma.cur = evo->dma.put;
334 evo->dma.free = evo->dma.max - evo->dma.cur;
335
336 ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
337 if (ret)
338 return ret;
339
340 for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
341 OUT_RING(evo, 0);
342
343 ret = RING_SPACE(evo, 11);
344 if (ret)
345 return ret;
346 BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
347 OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
348 OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
349 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
350 OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
351 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
352 OUT_RING(evo, 0);
353 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
354 OUT_RING(evo, 0);
355 BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
356 OUT_RING(evo, 0);
357 FIRE_RING(evo);
358 if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
359 NV_ERROR(dev, "evo pushbuf stalled\n");
360
361 /* enable clock change interrupts. */
362 nv_wr32(dev, 0x610028, 0x00010001);
363 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
364 NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
365 NV50_PDISPLAY_INTR_EN_CLK_UNK40));
366
367 /* enable hotplug interrupts */
368 hpd_en[0] = hpd_en[1] = 0;
369 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
370 struct nouveau_connector *conn = nouveau_connector(connector);
371 struct dcb_gpio_entry *gpio;
372
373 if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
374 connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
375 connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
376 continue;
377
378 gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
379 if (!gpio)
380 continue;
381
382 hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
383 }
384
385 nv_wr32(dev, 0xe054, 0xffffffff);
386 nv_wr32(dev, 0xe050, hpd_en[0]);
387 if (dev_priv->chipset >= 0x90) {
388 nv_wr32(dev, 0xe074, 0xffffffff);
389 nv_wr32(dev, 0xe070, hpd_en[1]);
390 }
391
392 return 0;
393}
394
395static int nv50_display_disable(struct drm_device *dev)
396{
397 struct drm_nouveau_private *dev_priv = dev->dev_private;
398 struct drm_crtc *drm_crtc;
399 int ret, i;
400
401 NV_DEBUG(dev, "\n");
402
403 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
404 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
405
406 nv50_crtc_blank(crtc, true);
407 }
408
409 ret = RING_SPACE(dev_priv->evo, 2);
410 if (ret == 0) {
411 BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
412 OUT_RING(dev_priv->evo, 0);
413 }
414 FIRE_RING(dev_priv->evo);
415
416 /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
417 * cleaning up?
418 */
419 list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
420 struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
421 uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
422
423 if (!crtc->base.enabled)
424 continue;
425
426 nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
427 if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
428 NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
429 "0x%08x\n", mask, mask);
430 NV_ERROR(dev, "0x610024 = 0x%08x\n",
431 nv_rd32(dev, NV50_PDISPLAY_INTR_1));
432 }
433 }
434
435 nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
436 nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
437 if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
438 NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
439 NV_ERROR(dev, "0x610200 = 0x%08x\n",
440 nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
441 }
442
443 for (i = 0; i < 3; i++) {
444 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
445 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
446 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
447 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
448 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
449 }
450 }
451
452 /* disable interrupts. */
453 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
454
455 /* disable hotplug interrupts */
456 nv_wr32(dev, 0xe054, 0xffffffff);
457 nv_wr32(dev, 0xe050, 0x00000000);
458 if (dev_priv->chipset >= 0x90) {
459 nv_wr32(dev, 0xe074, 0xffffffff);
460 nv_wr32(dev, 0xe070, 0x00000000);
461 }
462 return 0;
463}
464
465int nv50_display_create(struct drm_device *dev)
466{
467 struct drm_nouveau_private *dev_priv = dev->dev_private;
468 struct parsed_dcb *dcb = dev_priv->vbios->dcb;
469 uint32_t connector[16] = {};
470 int ret, i;
471
472 NV_DEBUG(dev, "\n");
473
474 /* init basic kernel modesetting */
475 drm_mode_config_init(dev);
476
477 /* Initialise some optional connector properties. */
478 drm_mode_create_scaling_mode_property(dev);
479 drm_mode_create_dithering_property(dev);
480
481 dev->mode_config.min_width = 0;
482 dev->mode_config.min_height = 0;
483
484 dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
485
486 dev->mode_config.max_width = 8192;
487 dev->mode_config.max_height = 8192;
488
489 dev->mode_config.fb_base = dev_priv->fb_phys;
490
491 /* Create EVO channel */
492 ret = nv50_evo_channel_new(dev, &dev_priv->evo);
493 if (ret) {
494 NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
495 return ret;
496 }
497
498 /* Create CRTC objects */
499 for (i = 0; i < 2; i++)
500 nv50_crtc_create(dev, i);
501
502 /* We setup the encoders from the BIOS table */
503 for (i = 0 ; i < dcb->entries; i++) {
504 struct dcb_entry *entry = &dcb->entry[i];
505
506 if (entry->location != DCB_LOC_ON_CHIP) {
507 NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
508 entry->type, ffs(entry->or) - 1);
509 continue;
510 }
511
512 switch (entry->type) {
513 case OUTPUT_TMDS:
514 case OUTPUT_LVDS:
515 case OUTPUT_DP:
516 nv50_sor_create(dev, entry);
517 break;
518 case OUTPUT_ANALOG:
519 nv50_dac_create(dev, entry);
520 break;
521 default:
522 NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
523 continue;
524 }
525
526 connector[entry->connector] |= (1 << entry->type);
527 }
528
529 /* It appears that DCB 3.0+ VBIOS has a connector table, however,
530 * I'm not 100% certain how to decode it correctly yet so just
531 * look at what encoders are present on each connector index and
532 * attempt to derive the connector type from that.
533 */
534 for (i = 0 ; i < dcb->entries; i++) {
535 struct dcb_entry *entry = &dcb->entry[i];
536 uint16_t encoders;
537 int type;
538
539 encoders = connector[entry->connector];
540 if (!(encoders & (1 << entry->type)))
541 continue;
542 connector[entry->connector] = 0;
543
544 if (encoders & (1 << OUTPUT_DP)) {
545 type = DRM_MODE_CONNECTOR_DisplayPort;
546 } else if (encoders & (1 << OUTPUT_TMDS)) {
547 if (encoders & (1 << OUTPUT_ANALOG))
548 type = DRM_MODE_CONNECTOR_DVII;
549 else
550 type = DRM_MODE_CONNECTOR_DVID;
551 } else if (encoders & (1 << OUTPUT_ANALOG)) {
552 type = DRM_MODE_CONNECTOR_VGA;
553 } else if (encoders & (1 << OUTPUT_LVDS)) {
554 type = DRM_MODE_CONNECTOR_LVDS;
555 } else {
556 type = DRM_MODE_CONNECTOR_Unknown;
557 }
558
559 if (type == DRM_MODE_CONNECTOR_Unknown)
560 continue;
561
562 nouveau_connector_create(dev, entry->connector, type);
563 }
564
565 ret = nv50_display_init(dev);
566 if (ret)
567 return ret;
568
569 return 0;
570}
571
572int nv50_display_destroy(struct drm_device *dev)
573{
574 struct drm_nouveau_private *dev_priv = dev->dev_private;
575
576 NV_DEBUG(dev, "\n");
577
578 drm_mode_config_cleanup(dev);
579
580 nv50_display_disable(dev);
581 nv50_evo_channel_del(&dev_priv->evo);
582
583 return 0;
584}
585
586static inline uint32_t
587nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
588{
589 struct drm_nouveau_private *dev_priv = dev->dev_private;
590 uint32_t mc;
591
592 if (sor) {
593 if (dev_priv->chipset < 0x90 ||
594 dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
595 mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
596 else
597 mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
598 } else {
599 mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
600 }
601
602 return mc;
603}
604
605static int
606nv50_display_irq_head(struct drm_device *dev, int *phead,
607 struct dcb_entry **pdcbent)
608{
609 struct drm_nouveau_private *dev_priv = dev->dev_private;
610 uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
611 uint32_t dac = 0, sor = 0;
612 int head, i, or = 0, type = OUTPUT_ANY;
613
614 /* We're assuming that head 0 *or* head 1 will be active here,
615 * and not both. I'm not sure if the hw will even signal both
616 * ever, but it definitely shouldn't for us as we commit each
617 * CRTC separately, and submission will be blocked by the GPU
618 * until we handle each in turn.
619 */
620 NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
621 head = ffs((unk30 >> 9) & 3) - 1;
622 if (head < 0)
623 return -EINVAL;
624
625 /* This assumes CRTCs are never bound to multiple encoders, which
626 * should be the case.
627 */
628 for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
629 uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
630 if (!(mc & (1 << head)))
631 continue;
632
633 switch ((mc >> 8) & 0xf) {
634 case 0: type = OUTPUT_ANALOG; break;
635 case 1: type = OUTPUT_TV; break;
636 default:
637 NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
638 return -1;
639 }
640
641 or = i;
642 }
643
644 for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
645 uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
646 if (!(mc & (1 << head)))
647 continue;
648
649 switch ((mc >> 8) & 0xf) {
650 case 0: type = OUTPUT_LVDS; break;
651 case 1: type = OUTPUT_TMDS; break;
652 case 2: type = OUTPUT_TMDS; break;
653 case 5: type = OUTPUT_TMDS; break;
654 case 8: type = OUTPUT_DP; break;
655 case 9: type = OUTPUT_DP; break;
656 default:
657 NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
658 return -1;
659 }
660
661 or = i;
662 }
663
664 NV_DEBUG(dev, "type %d, or %d\n", type, or);
665 if (type == OUTPUT_ANY) {
666 NV_ERROR(dev, "unknown encoder!!\n");
667 return -1;
668 }
669
670 for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
671 struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
672
673 if (dcbent->type != type)
674 continue;
675
676 if (!(dcbent->or & (1 << or)))
677 continue;
678
679 *phead = head;
680 *pdcbent = dcbent;
681 return 0;
682 }
683
684 NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
685 return 0;
686}
687
688static uint32_t
689nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
690 int pxclk)
691{
692 struct drm_nouveau_private *dev_priv = dev->dev_private;
693 struct nvbios *bios = &dev_priv->VBIOS;
694 uint32_t mc, script = 0, or;
695
696 or = ffs(dcbent->or) - 1;
697 mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
698 switch (dcbent->type) {
699 case OUTPUT_LVDS:
700 script = (mc >> 8) & 0xf;
701 if (bios->pub.fp_no_ddc) {
702 if (bios->fp.dual_link)
703 script |= 0x0100;
704 if (bios->fp.if_is_24bit)
705 script |= 0x0200;
706 } else {
707 if (pxclk >= bios->fp.duallink_transition_clk) {
708 script |= 0x0100;
709 if (bios->fp.strapless_is_24bit & 2)
710 script |= 0x0200;
711 } else
712 if (bios->fp.strapless_is_24bit & 1)
713 script |= 0x0200;
714 }
715
716 if (nouveau_uscript_lvds >= 0) {
717 NV_INFO(dev, "override script 0x%04x with 0x%04x "
718 "for output LVDS-%d\n", script,
719 nouveau_uscript_lvds, or);
720 script = nouveau_uscript_lvds;
721 }
722 break;
723 case OUTPUT_TMDS:
724 script = (mc >> 8) & 0xf;
725 if (pxclk >= 165000)
726 script |= 0x0100;
727
728 if (nouveau_uscript_tmds >= 0) {
729 NV_INFO(dev, "override script 0x%04x with 0x%04x "
730 "for output TMDS-%d\n", script,
731 nouveau_uscript_tmds, or);
732 script = nouveau_uscript_tmds;
733 }
734 break;
735 case OUTPUT_DP:
736 script = (mc >> 8) & 0xf;
737 break;
738 case OUTPUT_ANALOG:
739 script = 0xff;
740 break;
741 default:
742 NV_ERROR(dev, "modeset on unsupported output type!\n");
743 break;
744 }
745
746 return script;
747}
748
749static void
750nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
751{
752 struct drm_nouveau_private *dev_priv = dev->dev_private;
753 struct nouveau_channel *chan;
754 struct list_head *entry, *tmp;
755
756 list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
757 chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
758
759 nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
760 chan->nvsw.vblsem_rval);
761 list_del(&chan->nvsw.vbl_wait);
762 }
763}
764
765static void
766nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
767{
768 intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
769
770 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
771 nv50_display_vblank_crtc_handler(dev, 0);
772
773 if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
774 nv50_display_vblank_crtc_handler(dev, 1);
775
776 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
777 NV50_PDISPLAY_INTR_EN) & ~intr);
778 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
779}
780
781static void
782nv50_display_unk10_handler(struct drm_device *dev)
783{
784 struct dcb_entry *dcbent;
785 int head, ret;
786
787 ret = nv50_display_irq_head(dev, &head, &dcbent);
788 if (ret)
789 goto ack;
790
791 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
792
793 nouveau_bios_run_display_table(dev, dcbent, 0, -1);
794
795ack:
796 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
797 nv_wr32(dev, 0x610030, 0x80000000);
798}
799
800static void
801nv50_display_unk20_handler(struct drm_device *dev)
802{
803 struct dcb_entry *dcbent;
804 uint32_t tmp, pclk, script;
805 int head, or, ret;
806
807 ret = nv50_display_irq_head(dev, &head, &dcbent);
808 if (ret)
809 goto ack;
810 or = ffs(dcbent->or) - 1;
811 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
812 script = nv50_display_script_select(dev, dcbent, pclk);
813
814 NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
815
816 if (dcbent->type != OUTPUT_DP)
817 nouveau_bios_run_display_table(dev, dcbent, 0, -2);
818
819 nv50_crtc_set_clock(dev, head, pclk);
820
821 nouveau_bios_run_display_table(dev, dcbent, script, pclk);
822
823 tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
824 tmp &= ~0x000000f;
825 nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
826
827 if (dcbent->type != OUTPUT_ANALOG) {
828 tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
829 tmp &= ~0x00000f0f;
830 if (script & 0x0100)
831 tmp |= 0x00000101;
832 nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
833 } else {
834 nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
835 }
836
837ack:
838 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
839 nv_wr32(dev, 0x610030, 0x80000000);
840}
841
842static void
843nv50_display_unk40_handler(struct drm_device *dev)
844{
845 struct dcb_entry *dcbent;
846 int head, pclk, script, ret;
847
848 ret = nv50_display_irq_head(dev, &head, &dcbent);
849 if (ret)
850 goto ack;
851 pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
852 script = nv50_display_script_select(dev, dcbent, pclk);
853
854 nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
855
856ack:
857 nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
858 nv_wr32(dev, 0x610030, 0x80000000);
859 nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
860}
861
862void
863nv50_display_irq_handler_bh(struct work_struct *work)
864{
865 struct drm_nouveau_private *dev_priv =
866 container_of(work, struct drm_nouveau_private, irq_work);
867 struct drm_device *dev = dev_priv->dev;
868
869 for (;;) {
870 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
871 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
872
873 NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
874
875 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
876 nv50_display_unk10_handler(dev);
877 else
878 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
879 nv50_display_unk20_handler(dev);
880 else
881 if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
882 nv50_display_unk40_handler(dev);
883 else
884 break;
885 }
886
887 nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
888}
889
890static void
891nv50_display_error_handler(struct drm_device *dev)
892{
893 uint32_t addr, data;
894
895 nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
896 addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
897 data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
898
899 NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
900 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
901
902 nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
903}
904
905static void
906nv50_display_irq_hotplug(struct drm_device *dev)
907{
908 struct drm_nouveau_private *dev_priv = dev->dev_private;
909 struct drm_connector *connector;
910 const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
911 uint32_t unplug_mask, plug_mask, change_mask;
912 uint32_t hpd0, hpd1 = 0;
913
914 hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
915 if (dev_priv->chipset >= 0x90)
916 hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
917
918 plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
919 unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
920 change_mask = plug_mask | unplug_mask;
921
922 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
923 struct drm_encoder_helper_funcs *helper;
924 struct nouveau_connector *nv_connector =
925 nouveau_connector(connector);
926 struct nouveau_encoder *nv_encoder;
927 struct dcb_gpio_entry *gpio;
928 uint32_t reg;
929 bool plugged;
930
931 if (!nv_connector->dcb)
932 continue;
933
934 gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
935 if (!gpio || !(change_mask & (1 << gpio->line)))
936 continue;
937
938 reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
939 plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
940 NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
941 drm_get_connector_name(connector)) ;
942
943 if (!connector->encoder || !connector->encoder->crtc ||
944 !connector->encoder->crtc->enabled)
945 continue;
946 nv_encoder = nouveau_encoder(connector->encoder);
947 helper = connector->encoder->helper_private;
948
949 if (nv_encoder->dcb->type != OUTPUT_DP)
950 continue;
951
952 if (plugged)
953 helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
954 else
955 helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
956 }
957
958 nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
959 if (dev_priv->chipset >= 0x90)
960 nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
961}
962
963void
964nv50_display_irq_handler(struct drm_device *dev)
965{
966 struct drm_nouveau_private *dev_priv = dev->dev_private;
967 uint32_t delayed = 0;
968
969 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG)
970 nv50_display_irq_hotplug(dev);
971
972 while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
973 uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
974 uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
975 uint32_t clock;
976
977 NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
978
979 if (!intr0 && !(intr1 & ~delayed))
980 break;
981
982 if (intr0 & 0x00010000) {
983 nv50_display_error_handler(dev);
984 intr0 &= ~0x00010000;
985 }
986
987 if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
988 nv50_display_vblank_handler(dev, intr1);
989 intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
990 }
991
992 clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
993 NV50_PDISPLAY_INTR_1_CLK_UNK20 |
994 NV50_PDISPLAY_INTR_1_CLK_UNK40));
995 if (clock) {
996 nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
997 if (!work_pending(&dev_priv->irq_work))
998 queue_work(dev_priv->wq, &dev_priv->irq_work);
999 delayed |= clock;
1000 intr1 &= ~clock;
1001 }
1002
1003 if (intr0) {
1004 NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
1005 nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
1006 }
1007
1008 if (intr1) {
1009 NV_ERROR(dev,
1010 "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
1011 nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
1012 }
1013 }
1014}
1015
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
new file mode 100644
index 000000000000..3ae8d0725f63
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __NV50_DISPLAY_H__
28#define __NV50_DISPLAY_H__
29
30#include "drmP.h"
31#include "drm.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_reg.h"
35#include "nouveau_crtc.h"
36#include "nv50_evo.h"
37
38void nv50_display_irq_handler(struct drm_device *dev);
39void nv50_display_irq_handler_bh(struct work_struct *work);
40int nv50_display_init(struct drm_device *dev);
41int nv50_display_create(struct drm_device *dev);
42int nv50_display_destroy(struct drm_device *dev);
43int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
44int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
45
46#endif /* __NV50_DISPLAY_H__ */
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.h b/drivers/gpu/drm/nouveau/nv50_evo.h
new file mode 100644
index 000000000000..aae13343bcec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_evo.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#define NV50_EVO_UPDATE 0x00000080
28#define NV50_EVO_UNK84 0x00000084
29#define NV50_EVO_UNK84_NOTIFY 0x40000000
30#define NV50_EVO_UNK84_NOTIFY_DISABLED 0x00000000
31#define NV50_EVO_UNK84_NOTIFY_ENABLED 0x40000000
32#define NV50_EVO_DMA_NOTIFY 0x00000088
33#define NV50_EVO_DMA_NOTIFY_HANDLE 0xffffffff
34#define NV50_EVO_DMA_NOTIFY_HANDLE_NONE 0x00000000
35#define NV50_EVO_UNK8C 0x0000008C
36
37#define NV50_EVO_DAC(n, r) ((n) * 0x80 + NV50_EVO_DAC_##r)
38#define NV50_EVO_DAC_MODE_CTRL 0x00000400
39#define NV50_EVO_DAC_MODE_CTRL_CRTC0 0x00000001
40#define NV50_EVO_DAC_MODE_CTRL_CRTC1 0x00000002
41#define NV50_EVO_DAC_MODE_CTRL2 0x00000404
42#define NV50_EVO_DAC_MODE_CTRL2_NHSYNC 0x00000001
43#define NV50_EVO_DAC_MODE_CTRL2_NVSYNC 0x00000002
44
45#define NV50_EVO_SOR(n, r) ((n) * 0x40 + NV50_EVO_SOR_##r)
46#define NV50_EVO_SOR_MODE_CTRL 0x00000600
47#define NV50_EVO_SOR_MODE_CTRL_CRTC0 0x00000001
48#define NV50_EVO_SOR_MODE_CTRL_CRTC1 0x00000002
49#define NV50_EVO_SOR_MODE_CTRL_TMDS 0x00000100
50#define NV50_EVO_SOR_MODE_CTRL_TMDS_DUAL_LINK 0x00000400
51#define NV50_EVO_SOR_MODE_CTRL_NHSYNC 0x00001000
52#define NV50_EVO_SOR_MODE_CTRL_NVSYNC 0x00002000
53
54#define NV50_EVO_CRTC(n, r) ((n) * 0x400 + NV50_EVO_CRTC_##r)
55#define NV84_EVO_CRTC(n, r) ((n) * 0x400 + NV84_EVO_CRTC_##r)
56#define NV50_EVO_CRTC_UNK0800 0x00000800
57#define NV50_EVO_CRTC_CLOCK 0x00000804
58#define NV50_EVO_CRTC_INTERLACE 0x00000808
59#define NV50_EVO_CRTC_DISPLAY_START 0x00000810
60#define NV50_EVO_CRTC_DISPLAY_TOTAL 0x00000814
61#define NV50_EVO_CRTC_SYNC_DURATION 0x00000818
62#define NV50_EVO_CRTC_SYNC_START_TO_BLANK_END 0x0000081c
63#define NV50_EVO_CRTC_UNK0820 0x00000820
64#define NV50_EVO_CRTC_UNK0824 0x00000824
65#define NV50_EVO_CRTC_UNK082C 0x0000082c
66#define NV50_EVO_CRTC_CLUT_MODE 0x00000840
67/* You can't have a palette in 8 bit mode (=OFF) */
68#define NV50_EVO_CRTC_CLUT_MODE_BLANK 0x00000000
69#define NV50_EVO_CRTC_CLUT_MODE_OFF 0x80000000
70#define NV50_EVO_CRTC_CLUT_MODE_ON 0xC0000000
71#define NV50_EVO_CRTC_CLUT_OFFSET 0x00000844
72#define NV84_EVO_CRTC_CLUT_DMA 0x0000085C
73#define NV84_EVO_CRTC_CLUT_DMA_HANDLE 0xffffffff
74#define NV84_EVO_CRTC_CLUT_DMA_HANDLE_NONE 0x00000000
75#define NV50_EVO_CRTC_FB_OFFSET 0x00000860
76#define NV50_EVO_CRTC_FB_SIZE 0x00000868
77#define NV50_EVO_CRTC_FB_CONFIG 0x0000086c
78#define NV50_EVO_CRTC_FB_CONFIG_MODE 0x00100000
79#define NV50_EVO_CRTC_FB_CONFIG_MODE_TILE 0x00000000
80#define NV50_EVO_CRTC_FB_CONFIG_MODE_PITCH 0x00100000
81#define NV50_EVO_CRTC_FB_DEPTH 0x00000870
82#define NV50_EVO_CRTC_FB_DEPTH_8 0x00001e00
83#define NV50_EVO_CRTC_FB_DEPTH_15 0x0000e900
84#define NV50_EVO_CRTC_FB_DEPTH_16 0x0000e800
85#define NV50_EVO_CRTC_FB_DEPTH_24 0x0000cf00
86#define NV50_EVO_CRTC_FB_DEPTH_30 0x0000d100
87#define NV50_EVO_CRTC_FB_DMA 0x00000874
88#define NV50_EVO_CRTC_FB_DMA_HANDLE 0xffffffff
89#define NV50_EVO_CRTC_FB_DMA_HANDLE_NONE 0x00000000
90#define NV50_EVO_CRTC_CURSOR_CTRL 0x00000880
91#define NV50_EVO_CRTC_CURSOR_CTRL_HIDE 0x05000000
92#define NV50_EVO_CRTC_CURSOR_CTRL_SHOW 0x85000000
93#define NV50_EVO_CRTC_CURSOR_OFFSET 0x00000884
94#define NV84_EVO_CRTC_CURSOR_DMA 0x0000089c
95#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE 0xffffffff
96#define NV84_EVO_CRTC_CURSOR_DMA_HANDLE_NONE 0x00000000
97#define NV50_EVO_CRTC_DITHER_CTRL 0x000008a0
98#define NV50_EVO_CRTC_DITHER_CTRL_OFF 0x00000000
99#define NV50_EVO_CRTC_DITHER_CTRL_ON 0x00000011
100#define NV50_EVO_CRTC_SCALE_CTRL 0x000008a4
101#define NV50_EVO_CRTC_SCALE_CTRL_INACTIVE 0x00000000
102#define NV50_EVO_CRTC_SCALE_CTRL_ACTIVE 0x00000009
103#define NV50_EVO_CRTC_COLOR_CTRL 0x000008a8
104#define NV50_EVO_CRTC_COLOR_CTRL_COLOR 0x00040000
105#define NV50_EVO_CRTC_FB_POS 0x000008c0
106#define NV50_EVO_CRTC_REAL_RES 0x000008c8
107#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET 0x000008d4
108#define NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(x, y) \
109 ((((unsigned)y << 16) & 0xFFFF0000) | (((unsigned)x) & 0x0000FFFF))
110/* Both of these are needed, otherwise nothing happens. */
111#define NV50_EVO_CRTC_SCALE_RES1 0x000008d8
112#define NV50_EVO_CRTC_SCALE_RES2 0x000008dc
113
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
new file mode 100644
index 000000000000..6bcc6d39e9b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -0,0 +1,273 @@
1#include "drmP.h"
2#include "nouveau_drv.h"
3#include "nouveau_dma.h"
4#include "nouveau_fbcon.h"
5
6static void
7nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
8{
9 struct nouveau_fbcon_par *par = info->par;
10 struct drm_device *dev = par->dev;
11 struct drm_nouveau_private *dev_priv = dev->dev_private;
12 struct nouveau_channel *chan = dev_priv->channel;
13
14 if (info->state != FBINFO_STATE_RUNNING)
15 return;
16
17 if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
18 RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
19 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
20
21 info->flags |= FBINFO_HWACCEL_DISABLED;
22 }
23
24 if (info->flags & FBINFO_HWACCEL_DISABLED) {
25 cfb_fillrect(info, rect);
26 return;
27 }
28
29 if (rect->rop != ROP_COPY) {
30 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
31 OUT_RING(chan, 1);
32 }
33 BEGIN_RING(chan, NvSub2D, 0x0588, 1);
34 OUT_RING(chan, rect->color);
35 BEGIN_RING(chan, NvSub2D, 0x0600, 4);
36 OUT_RING(chan, rect->dx);
37 OUT_RING(chan, rect->dy);
38 OUT_RING(chan, rect->dx + rect->width);
39 OUT_RING(chan, rect->dy + rect->height);
40 if (rect->rop != ROP_COPY) {
41 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
42 OUT_RING(chan, 3);
43 }
44 FIRE_RING(chan);
45}
46
47static void
48nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
49{
50 struct nouveau_fbcon_par *par = info->par;
51 struct drm_device *dev = par->dev;
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_channel *chan = dev_priv->channel;
54
55 if (info->state != FBINFO_STATE_RUNNING)
56 return;
57
58 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
59 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
60
61 info->flags |= FBINFO_HWACCEL_DISABLED;
62 }
63
64 if (info->flags & FBINFO_HWACCEL_DISABLED) {
65 cfb_copyarea(info, region);
66 return;
67 }
68
69 BEGIN_RING(chan, NvSub2D, 0x0110, 1);
70 OUT_RING(chan, 0);
71 BEGIN_RING(chan, NvSub2D, 0x08b0, 4);
72 OUT_RING(chan, region->dx);
73 OUT_RING(chan, region->dy);
74 OUT_RING(chan, region->width);
75 OUT_RING(chan, region->height);
76 BEGIN_RING(chan, NvSub2D, 0x08d0, 4);
77 OUT_RING(chan, 0);
78 OUT_RING(chan, region->sx);
79 OUT_RING(chan, 0);
80 OUT_RING(chan, region->sy);
81 FIRE_RING(chan);
82}
83
84static void
85nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
86{
87 struct nouveau_fbcon_par *par = info->par;
88 struct drm_device *dev = par->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 struct nouveau_channel *chan = dev_priv->channel;
91 uint32_t width, dwords, *data = (uint32_t *)image->data;
92 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
93 uint32_t *palette = info->pseudo_palette;
94
95 if (info->state != FBINFO_STATE_RUNNING)
96 return;
97
98 if (image->depth != 1) {
99 cfb_imageblit(info, image);
100 return;
101 }
102
103 if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
104 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
105 info->flags |= FBINFO_HWACCEL_DISABLED;
106 }
107
108 if (info->flags & FBINFO_HWACCEL_DISABLED) {
109 cfb_imageblit(info, image);
110 return;
111 }
112
113 width = (image->width + 31) & ~31;
114 dwords = (width * image->height) >> 5;
115
116 BEGIN_RING(chan, NvSub2D, 0x0814, 2);
117 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
118 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
119 OUT_RING(chan, palette[image->bg_color] | mask);
120 OUT_RING(chan, palette[image->fg_color] | mask);
121 } else {
122 OUT_RING(chan, image->bg_color);
123 OUT_RING(chan, image->fg_color);
124 }
125 BEGIN_RING(chan, NvSub2D, 0x0838, 2);
126 OUT_RING(chan, image->width);
127 OUT_RING(chan, image->height);
128 BEGIN_RING(chan, NvSub2D, 0x0850, 4);
129 OUT_RING(chan, 0);
130 OUT_RING(chan, image->dx);
131 OUT_RING(chan, 0);
132 OUT_RING(chan, image->dy);
133
134 while (dwords) {
135 int push = dwords > 2047 ? 2047 : dwords;
136
137 if (RING_SPACE(chan, push + 1)) {
138 NV_ERROR(dev,
139 "GPU lockup - switching to software fbcon\n");
140 info->flags |= FBINFO_HWACCEL_DISABLED;
141 cfb_imageblit(info, image);
142 return;
143 }
144
145 dwords -= push;
146
147 BEGIN_RING(chan, NvSub2D, 0x40000860, push);
148 OUT_RINGp(chan, data, push);
149 data += push;
150 }
151
152 FIRE_RING(chan);
153}
154
155int
156nv50_fbcon_accel_init(struct fb_info *info)
157{
158 struct nouveau_fbcon_par *par = info->par;
159 struct drm_device *dev = par->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 struct nouveau_channel *chan = dev_priv->channel;
162 struct nouveau_gpuobj *eng2d = NULL;
163 int ret, format;
164
165 switch (info->var.bits_per_pixel) {
166 case 8:
167 format = 0xf3;
168 break;
169 case 15:
170 format = 0xf8;
171 break;
172 case 16:
173 format = 0xe8;
174 break;
175 case 32:
176 switch (info->var.transp.length) {
177 case 0: /* depth 24 */
178 case 8: /* depth 32, just use 24.. */
179 format = 0xe6;
180 break;
181 case 2: /* depth 30 */
182 format = 0xd1;
183 break;
184 default:
185 return -EINVAL;
186 }
187 break;
188 default:
189 return -EINVAL;
190 }
191
192 ret = nouveau_gpuobj_gr_new(dev_priv->channel, 0x502d, &eng2d);
193 if (ret)
194 return ret;
195
196 ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, Nv2D, eng2d, NULL);
197 if (ret)
198 return ret;
199
200 ret = RING_SPACE(chan, 59);
201 if (ret) {
202 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
203 return ret;
204 }
205
206 BEGIN_RING(chan, NvSub2D, 0x0000, 1);
207 OUT_RING(chan, Nv2D);
208 BEGIN_RING(chan, NvSub2D, 0x0180, 4);
209 OUT_RING(chan, NvNotify0);
210 OUT_RING(chan, chan->vram_handle);
211 OUT_RING(chan, chan->vram_handle);
212 OUT_RING(chan, chan->vram_handle);
213 BEGIN_RING(chan, NvSub2D, 0x0290, 1);
214 OUT_RING(chan, 0);
215 BEGIN_RING(chan, NvSub2D, 0x0888, 1);
216 OUT_RING(chan, 1);
217 BEGIN_RING(chan, NvSub2D, 0x02ac, 1);
218 OUT_RING(chan, 3);
219 BEGIN_RING(chan, NvSub2D, 0x02a0, 1);
220 OUT_RING(chan, 0x55);
221 BEGIN_RING(chan, NvSub2D, 0x08c0, 4);
222 OUT_RING(chan, 0);
223 OUT_RING(chan, 1);
224 OUT_RING(chan, 0);
225 OUT_RING(chan, 1);
226 BEGIN_RING(chan, NvSub2D, 0x0580, 2);
227 OUT_RING(chan, 4);
228 OUT_RING(chan, format);
229 BEGIN_RING(chan, NvSub2D, 0x02e8, 2);
230 OUT_RING(chan, 2);
231 OUT_RING(chan, 1);
232 BEGIN_RING(chan, NvSub2D, 0x0804, 1);
233 OUT_RING(chan, format);
234 BEGIN_RING(chan, NvSub2D, 0x0800, 1);
235 OUT_RING(chan, 1);
236 BEGIN_RING(chan, NvSub2D, 0x0808, 3);
237 OUT_RING(chan, 0);
238 OUT_RING(chan, 0);
239 OUT_RING(chan, 0);
240 BEGIN_RING(chan, NvSub2D, 0x081c, 1);
241 OUT_RING(chan, 1);
242 BEGIN_RING(chan, NvSub2D, 0x0840, 4);
243 OUT_RING(chan, 0);
244 OUT_RING(chan, 1);
245 OUT_RING(chan, 0);
246 OUT_RING(chan, 1);
247 BEGIN_RING(chan, NvSub2D, 0x0200, 2);
248 OUT_RING(chan, format);
249 OUT_RING(chan, 1);
250 BEGIN_RING(chan, NvSub2D, 0x0214, 5);
251 OUT_RING(chan, info->fix.line_length);
252 OUT_RING(chan, info->var.xres_virtual);
253 OUT_RING(chan, info->var.yres_virtual);
254 OUT_RING(chan, 0);
255 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
256 dev_priv->vm_vram_base);
257 BEGIN_RING(chan, NvSub2D, 0x0230, 2);
258 OUT_RING(chan, format);
259 OUT_RING(chan, 1);
260 BEGIN_RING(chan, NvSub2D, 0x0244, 5);
261 OUT_RING(chan, info->fix.line_length);
262 OUT_RING(chan, info->var.xres_virtual);
263 OUT_RING(chan, info->var.yres_virtual);
264 OUT_RING(chan, 0);
265 OUT_RING(chan, info->fix.smem_start - dev_priv->fb_phys +
266 dev_priv->vm_vram_base);
267
268 info->fbops->fb_fillrect = nv50_fbcon_fillrect;
269 info->fbops->fb_copyarea = nv50_fbcon_copyarea;
270 info->fbops->fb_imageblit = nv50_fbcon_imageblit;
271 return 0;
272}
273
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 000000000000..77ae1aaa0bce
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,494 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31struct nv50_fifo_priv {
32 struct nouveau_gpuobj_ref *thingo[2];
33 int cur_thingo;
34};
35
36#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
37
38static void
39nv50_fifo_init_thingo(struct drm_device *dev)
40{
41 struct drm_nouveau_private *dev_priv = dev->dev_private;
42 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
43 struct nouveau_gpuobj_ref *cur;
44 int i, nr;
45
46 NV_DEBUG(dev, "\n");
47
48 cur = priv->thingo[priv->cur_thingo];
49 priv->cur_thingo = !priv->cur_thingo;
50
51 /* We never schedule channel 0 or 127 */
52 dev_priv->engine.instmem.prepare_access(dev, true);
53 for (i = 1, nr = 0; i < 127; i++) {
54 if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
55 nv_wo32(dev, cur->gpuobj, nr++, i);
56 }
57 dev_priv->engine.instmem.finish_access(dev);
58
59 nv_wr32(dev, 0x32f4, cur->instance >> 12);
60 nv_wr32(dev, 0x32ec, nr);
61 nv_wr32(dev, 0x2500, 0x101);
62}
63
64static int
65nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
66{
67 struct drm_nouveau_private *dev_priv = dev->dev_private;
68 struct nouveau_channel *chan = dev_priv->fifos[channel];
69 uint32_t inst;
70
71 NV_DEBUG(dev, "ch%d\n", channel);
72
73 if (!chan->ramfc)
74 return -EINVAL;
75
76 if (IS_G80)
77 inst = chan->ramfc->instance >> 12;
78 else
79 inst = chan->ramfc->instance >> 8;
80 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
81 inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
82
83 if (!nt)
84 nv50_fifo_init_thingo(dev);
85 return 0;
86}
87
88static void
89nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
90{
91 struct drm_nouveau_private *dev_priv = dev->dev_private;
92 uint32_t inst;
93
94 NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
95
96 if (IS_G80)
97 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
98 else
99 inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
100 nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
101
102 if (!nt)
103 nv50_fifo_init_thingo(dev);
104}
105
106static void
107nv50_fifo_init_reset(struct drm_device *dev)
108{
109 uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
110
111 NV_DEBUG(dev, "\n");
112
113 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
114 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
115}
116
117static void
118nv50_fifo_init_intr(struct drm_device *dev)
119{
120 NV_DEBUG(dev, "\n");
121
122 nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
123 nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
124}
125
126static void
127nv50_fifo_init_context_table(struct drm_device *dev)
128{
129 struct drm_nouveau_private *dev_priv = dev->dev_private;
130 int i;
131
132 NV_DEBUG(dev, "\n");
133
134 for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
135 if (dev_priv->fifos[i])
136 nv50_fifo_channel_enable(dev, i, true);
137 else
138 nv50_fifo_channel_disable(dev, i, true);
139 }
140
141 nv50_fifo_init_thingo(dev);
142}
143
144static void
145nv50_fifo_init_regs__nv(struct drm_device *dev)
146{
147 NV_DEBUG(dev, "\n");
148
149 nv_wr32(dev, 0x250c, 0x6f3cfc34);
150}
151
152static void
153nv50_fifo_init_regs(struct drm_device *dev)
154{
155 NV_DEBUG(dev, "\n");
156
157 nv_wr32(dev, 0x2500, 0);
158 nv_wr32(dev, 0x3250, 0);
159 nv_wr32(dev, 0x3220, 0);
160 nv_wr32(dev, 0x3204, 0);
161 nv_wr32(dev, 0x3210, 0);
162 nv_wr32(dev, 0x3270, 0);
163
164 /* Enable dummy channels setup by nv50_instmem.c */
165 nv50_fifo_channel_enable(dev, 0, true);
166 nv50_fifo_channel_enable(dev, 127, true);
167}
168
169int
170nv50_fifo_init(struct drm_device *dev)
171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173 struct nv50_fifo_priv *priv;
174 int ret;
175
176 NV_DEBUG(dev, "\n");
177
178 priv = dev_priv->engine.fifo.priv;
179 if (priv) {
180 priv->cur_thingo = !priv->cur_thingo;
181 goto just_reset;
182 }
183
184 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
185 if (!priv)
186 return -ENOMEM;
187 dev_priv->engine.fifo.priv = priv;
188
189 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
190 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
191 if (ret) {
192 NV_ERROR(dev, "error creating thingo0: %d\n", ret);
193 return ret;
194 }
195
196 ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
197 NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
198 if (ret) {
199 NV_ERROR(dev, "error creating thingo1: %d\n", ret);
200 return ret;
201 }
202
203just_reset:
204 nv50_fifo_init_reset(dev);
205 nv50_fifo_init_intr(dev);
206 nv50_fifo_init_context_table(dev);
207 nv50_fifo_init_regs__nv(dev);
208 nv50_fifo_init_regs(dev);
209 dev_priv->engine.fifo.enable(dev);
210 dev_priv->engine.fifo.reassign(dev, true);
211
212 return 0;
213}
214
215void
216nv50_fifo_takedown(struct drm_device *dev)
217{
218 struct drm_nouveau_private *dev_priv = dev->dev_private;
219 struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
220
221 NV_DEBUG(dev, "\n");
222
223 if (!priv)
224 return;
225
226 nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
227 nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
228
229 dev_priv->engine.fifo.priv = NULL;
230 kfree(priv);
231}
232
233int
234nv50_fifo_channel_id(struct drm_device *dev)
235{
236 return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
237 NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
238}
239
240int
241nv50_fifo_create_context(struct nouveau_channel *chan)
242{
243 struct drm_device *dev = chan->dev;
244 struct drm_nouveau_private *dev_priv = dev->dev_private;
245 struct nouveau_gpuobj *ramfc = NULL;
246 int ret;
247
248 NV_DEBUG(dev, "ch%d\n", chan->id);
249
250 if (IS_G80) {
251 uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
252 uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
253
254 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
255 0x100, NVOBJ_FLAG_ZERO_ALLOC |
256 NVOBJ_FLAG_ZERO_FREE, &ramfc,
257 &chan->ramfc);
258 if (ret)
259 return ret;
260
261 ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
262 ramin_voffset + 0x0400, 4096,
263 0, NULL, &chan->cache);
264 if (ret)
265 return ret;
266 } else {
267 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
268 NVOBJ_FLAG_ZERO_ALLOC |
269 NVOBJ_FLAG_ZERO_FREE,
270 &chan->ramfc);
271 if (ret)
272 return ret;
273 ramfc = chan->ramfc->gpuobj;
274
275 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 256,
276 0, &chan->cache);
277 if (ret)
278 return ret;
279 }
280
281 dev_priv->engine.instmem.prepare_access(dev, true);
282
283 nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
284 nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
285 nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
286 nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
287 nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
288 nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
289 nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
290 nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
291 nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
292 nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
293 nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
294
295 if (!IS_G80) {
296 nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
297 nv_wo32(dev, chan->ramin->gpuobj, 1,
298 chan->ramfc->instance >> 8);
299
300 nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
301 nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
302 }
303
304 dev_priv->engine.instmem.finish_access(dev);
305
306 ret = nv50_fifo_channel_enable(dev, chan->id, false);
307 if (ret) {
308 NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 return ret;
311 }
312
313 return 0;
314}
315
316void
317nv50_fifo_destroy_context(struct nouveau_channel *chan)
318{
319 struct drm_device *dev = chan->dev;
320
321 NV_DEBUG(dev, "ch%d\n", chan->id);
322
323 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
324 nouveau_gpuobj_ref_del(dev, &chan->cache);
325
326 nv50_fifo_channel_disable(dev, chan->id, false);
327
328 /* Dummy channel, also used on ch 127 */
329 if (chan->id == 0)
330 nv50_fifo_channel_disable(dev, 127, false);
331}
332
333int
334nv50_fifo_load_context(struct nouveau_channel *chan)
335{
336 struct drm_device *dev = chan->dev;
337 struct drm_nouveau_private *dev_priv = dev->dev_private;
338 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
339 struct nouveau_gpuobj *cache = chan->cache->gpuobj;
340 int ptr, cnt;
341
342 NV_DEBUG(dev, "ch%d\n", chan->id);
343
344 dev_priv->engine.instmem.prepare_access(dev, false);
345
346 nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
347 nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
348 nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
349 nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
350 nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
351 nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
352 nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
353 nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
354 nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
355 nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
356 nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
357 nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
358 nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
359 nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
360 nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
361 nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
362 nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
363 nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
364 nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
365 nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
366 nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
367 nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
368 nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
369 nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
370 nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
371 nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
372 nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
373 nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
374 nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
375 nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
376 nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
377 nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
378 nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
379
380 cnt = nv_ro32(dev, ramfc, 0x84/4);
381 for (ptr = 0; ptr < cnt; ptr++) {
382 nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
383 nv_ro32(dev, cache, (ptr * 2) + 0));
384 nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
385 nv_ro32(dev, cache, (ptr * 2) + 1));
386 }
387 nv_wr32(dev, 0x3210, cnt << 2);
388 nv_wr32(dev, 0x3270, 0);
389
390 /* guessing that all the 0x34xx regs aren't on NV50 */
391 if (!IS_G80) {
392 nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
393 nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
394 nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
395 nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
396 nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
397 }
398
399 dev_priv->engine.instmem.finish_access(dev);
400
401 nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
402 nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
403 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
404 return 0;
405}
406
407int
408nv50_fifo_unload_context(struct drm_device *dev)
409{
410 struct drm_nouveau_private *dev_priv = dev->dev_private;
411 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
412 struct nouveau_gpuobj *ramfc, *cache;
413 struct nouveau_channel *chan = NULL;
414 int chid, get, put, ptr;
415
416 NV_DEBUG(dev, "\n");
417
418 chid = pfifo->channel_id(dev);
419 if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
420 return 0;
421
422 chan = dev_priv->fifos[chid];
423 if (!chan) {
424 NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
425 return -EINVAL;
426 }
427 NV_DEBUG(dev, "ch%d\n", chan->id);
428 ramfc = chan->ramfc->gpuobj;
429 cache = chan->cache->gpuobj;
430
431 dev_priv->engine.instmem.prepare_access(dev, true);
432
433 nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
434 nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
435 nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
436 nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
437 nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
438 nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
439 nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
440 nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
441 nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
442 nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
443 nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
444 nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
445 nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
446 nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
447 nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
448 nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
449 nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
450 nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
451 nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
452 nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
453 nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
454 nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
455 nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
456 nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
457 nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
458 nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
459 nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
460 nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
461 nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
462 nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
463 nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
464 nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
465 nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
466
467 put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
468 get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
469 ptr = 0;
470 while (put != get) {
471 nv_wo32(dev, cache, ptr++,
472 nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
473 nv_wo32(dev, cache, ptr++,
474 nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
475 get = (get + 1) & 0x1ff;
476 }
477
478 /* guessing that all the 0x34xx regs aren't on NV50 */
479 if (!IS_G80) {
480 nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
481 nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
482 nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
483 nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
484 nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
485 nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
486 }
487
488 dev_priv->engine.instmem.finish_access(dev);
489
490 /*XXX: probably reload ch127 (NULL) state back too */
491 nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
492 return 0;
493}
494
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 000000000000..177d8229336f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,385 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31MODULE_FIRMWARE("nouveau/nv50.ctxprog");
32MODULE_FIRMWARE("nouveau/nv50.ctxvals");
33MODULE_FIRMWARE("nouveau/nv84.ctxprog");
34MODULE_FIRMWARE("nouveau/nv84.ctxvals");
35MODULE_FIRMWARE("nouveau/nv86.ctxprog");
36MODULE_FIRMWARE("nouveau/nv86.ctxvals");
37MODULE_FIRMWARE("nouveau/nv92.ctxprog");
38MODULE_FIRMWARE("nouveau/nv92.ctxvals");
39MODULE_FIRMWARE("nouveau/nv94.ctxprog");
40MODULE_FIRMWARE("nouveau/nv94.ctxvals");
41MODULE_FIRMWARE("nouveau/nv96.ctxprog");
42MODULE_FIRMWARE("nouveau/nv96.ctxvals");
43MODULE_FIRMWARE("nouveau/nv98.ctxprog");
44MODULE_FIRMWARE("nouveau/nv98.ctxvals");
45MODULE_FIRMWARE("nouveau/nva0.ctxprog");
46MODULE_FIRMWARE("nouveau/nva0.ctxvals");
47MODULE_FIRMWARE("nouveau/nva5.ctxprog");
48MODULE_FIRMWARE("nouveau/nva5.ctxvals");
49MODULE_FIRMWARE("nouveau/nva8.ctxprog");
50MODULE_FIRMWARE("nouveau/nva8.ctxvals");
51MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
52MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
53MODULE_FIRMWARE("nouveau/nvac.ctxprog");
54MODULE_FIRMWARE("nouveau/nvac.ctxvals");
55
56#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
57
58static void
59nv50_graph_init_reset(struct drm_device *dev)
60{
61 uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
62
63 NV_DEBUG(dev, "\n");
64
65 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
66 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
67}
68
69static void
70nv50_graph_init_intr(struct drm_device *dev)
71{
72 NV_DEBUG(dev, "\n");
73
74 nv_wr32(dev, NV03_PGRAPH_INTR, 0xffffffff);
75 nv_wr32(dev, 0x400138, 0xffffffff);
76 nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xffffffff);
77}
78
79static void
80nv50_graph_init_regs__nv(struct drm_device *dev)
81{
82 NV_DEBUG(dev, "\n");
83
84 nv_wr32(dev, 0x400804, 0xc0000000);
85 nv_wr32(dev, 0x406800, 0xc0000000);
86 nv_wr32(dev, 0x400c04, 0xc0000000);
87 nv_wr32(dev, 0x401804, 0xc0000000);
88 nv_wr32(dev, 0x405018, 0xc0000000);
89 nv_wr32(dev, 0x402000, 0xc0000000);
90
91 nv_wr32(dev, 0x400108, 0xffffffff);
92
93 nv_wr32(dev, 0x400824, 0x00004000);
94 nv_wr32(dev, 0x400500, 0x00010001);
95}
96
97static void
98nv50_graph_init_regs(struct drm_device *dev)
99{
100 NV_DEBUG(dev, "\n");
101
102 nv_wr32(dev, NV04_PGRAPH_DEBUG_3,
103 (1 << 2) /* HW_CONTEXT_SWITCH_ENABLED */);
104 nv_wr32(dev, 0x402ca8, 0x800);
105}
106
107static int
108nv50_graph_init_ctxctl(struct drm_device *dev)
109{
110 NV_DEBUG(dev, "\n");
111
112 nv40_grctx_init(dev);
113
114 nv_wr32(dev, 0x400320, 4);
115 nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
116 nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
117 return 0;
118}
119
120int
121nv50_graph_init(struct drm_device *dev)
122{
123 int ret;
124
125 NV_DEBUG(dev, "\n");
126
127 nv50_graph_init_reset(dev);
128 nv50_graph_init_regs__nv(dev);
129 nv50_graph_init_regs(dev);
130 nv50_graph_init_intr(dev);
131
132 ret = nv50_graph_init_ctxctl(dev);
133 if (ret)
134 return ret;
135
136 return 0;
137}
138
139void
140nv50_graph_takedown(struct drm_device *dev)
141{
142 NV_DEBUG(dev, "\n");
143 nv40_grctx_fini(dev);
144}
145
146void
147nv50_graph_fifo_access(struct drm_device *dev, bool enabled)
148{
149 const uint32_t mask = 0x00010001;
150
151 if (enabled)
152 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | mask);
153 else
154 nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) & ~mask);
155}
156
157struct nouveau_channel *
158nv50_graph_channel(struct drm_device *dev)
159{
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
161 uint32_t inst;
162 int i;
163
164 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
165 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
166 return NULL;
167 inst = (inst & NV50_PGRAPH_CTXCTL_CUR_INSTANCE) << 12;
168
169 for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
170 struct nouveau_channel *chan = dev_priv->fifos[i];
171
172 if (chan && chan->ramin && chan->ramin->instance == inst)
173 return chan;
174 }
175
176 return NULL;
177}
178
179int
180nv50_graph_create_context(struct nouveau_channel *chan)
181{
182 struct drm_device *dev = chan->dev;
183 struct drm_nouveau_private *dev_priv = dev->dev_private;
184 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
185 struct nouveau_gpuobj *ctx;
186 uint32_t grctx_size = 0x70000;
187 int hdr, ret;
188
189 NV_DEBUG(dev, "ch%d\n", chan->id);
190
191 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
192 NVOBJ_FLAG_ZERO_ALLOC |
193 NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
194 if (ret)
195 return ret;
196 ctx = chan->ramin_grctx->gpuobj;
197
198 hdr = IS_G80 ? 0x200 : 0x20;
199 dev_priv->engine.instmem.prepare_access(dev, true);
200 nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
201 nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
202 grctx_size - 1);
203 nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
204 nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
205 nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
206 nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
207 dev_priv->engine.instmem.finish_access(dev);
208
209 dev_priv->engine.instmem.prepare_access(dev, true);
210 nv40_grctx_vals_load(dev, ctx);
211 nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
212 if ((dev_priv->chipset & 0xf0) == 0xa0)
213 nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
214 else
215 nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
216 dev_priv->engine.instmem.finish_access(dev);
217
218 return 0;
219}
220
221void
222nv50_graph_destroy_context(struct nouveau_channel *chan)
223{
224 struct drm_device *dev = chan->dev;
225 struct drm_nouveau_private *dev_priv = dev->dev_private;
226 int i, hdr = IS_G80 ? 0x200 : 0x20;
227
228 NV_DEBUG(dev, "ch%d\n", chan->id);
229
230 if (!chan->ramin || !chan->ramin->gpuobj)
231 return;
232
233 dev_priv->engine.instmem.prepare_access(dev, true);
234 for (i = hdr; i < hdr + 24; i += 4)
235 nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
236 dev_priv->engine.instmem.finish_access(dev);
237
238 nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
239}
240
241static int
242nv50_graph_do_load_context(struct drm_device *dev, uint32_t inst)
243{
244 uint32_t fifo = nv_rd32(dev, 0x400500);
245
246 nv_wr32(dev, 0x400500, fifo & ~1);
247 nv_wr32(dev, 0x400784, inst);
248 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x40);
249 nv_wr32(dev, 0x400320, nv_rd32(dev, 0x400320) | 0x11);
250 nv_wr32(dev, 0x400040, 0xffffffff);
251 (void)nv_rd32(dev, 0x400040);
252 nv_wr32(dev, 0x400040, 0x00000000);
253 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 1);
254
255 if (nouveau_wait_for_idle(dev))
256 nv_wr32(dev, 0x40032c, inst | (1<<31));
257 nv_wr32(dev, 0x400500, fifo);
258
259 return 0;
260}
261
262int
263nv50_graph_load_context(struct nouveau_channel *chan)
264{
265 uint32_t inst = chan->ramin->instance >> 12;
266
267 NV_DEBUG(chan->dev, "ch%d\n", chan->id);
268 return nv50_graph_do_load_context(chan->dev, inst);
269}
270
271int
272nv50_graph_unload_context(struct drm_device *dev)
273{
274 uint32_t inst, fifo = nv_rd32(dev, 0x400500);
275
276 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_CUR);
277 if (!(inst & NV50_PGRAPH_CTXCTL_CUR_LOADED))
278 return 0;
279 inst &= NV50_PGRAPH_CTXCTL_CUR_INSTANCE;
280
281 nv_wr32(dev, 0x400500, fifo & ~1);
282 nv_wr32(dev, 0x400784, inst);
283 nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) | 0x20);
284 nv_wr32(dev, 0x400304, nv_rd32(dev, 0x400304) | 0x01);
285 nouveau_wait_for_idle(dev);
286 nv_wr32(dev, 0x400500, fifo);
287
288 nv_wr32(dev, NV50_PGRAPH_CTXCTL_CUR, inst);
289 return 0;
290}
291
292void
293nv50_graph_context_switch(struct drm_device *dev)
294{
295 uint32_t inst;
296
297 nv50_graph_unload_context(dev);
298
299 inst = nv_rd32(dev, NV50_PGRAPH_CTXCTL_NEXT);
300 inst &= NV50_PGRAPH_CTXCTL_NEXT_INSTANCE;
301 nv50_graph_do_load_context(dev, inst);
302
303 nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
304 NV40_PGRAPH_INTR_EN) | NV_PGRAPH_INTR_CONTEXT_SWITCH);
305}
306
307static int
308nv50_graph_nvsw_dma_vblsem(struct nouveau_channel *chan, int grclass,
309 int mthd, uint32_t data)
310{
311 struct nouveau_gpuobj_ref *ref = NULL;
312
313 if (nouveau_gpuobj_ref_find(chan, data, &ref))
314 return -ENOENT;
315
316 if (nouveau_notifier_offset(ref->gpuobj, NULL))
317 return -EINVAL;
318
319 chan->nvsw.vblsem = ref->gpuobj;
320 chan->nvsw.vblsem_offset = ~0;
321 return 0;
322}
323
324static int
325nv50_graph_nvsw_vblsem_offset(struct nouveau_channel *chan, int grclass,
326 int mthd, uint32_t data)
327{
328 if (nouveau_notifier_offset(chan->nvsw.vblsem, &data))
329 return -ERANGE;
330
331 chan->nvsw.vblsem_offset = data >> 2;
332 return 0;
333}
334
335static int
336nv50_graph_nvsw_vblsem_release_val(struct nouveau_channel *chan, int grclass,
337 int mthd, uint32_t data)
338{
339 chan->nvsw.vblsem_rval = data;
340 return 0;
341}
342
343static int
344nv50_graph_nvsw_vblsem_release(struct nouveau_channel *chan, int grclass,
345 int mthd, uint32_t data)
346{
347 struct drm_device *dev = chan->dev;
348 struct drm_nouveau_private *dev_priv = dev->dev_private;
349
350 if (!chan->nvsw.vblsem || chan->nvsw.vblsem_offset == ~0 || data > 1)
351 return -EINVAL;
352
353 if (!(nv_rd32(dev, NV50_PDISPLAY_INTR_EN) &
354 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data))) {
355 nv_wr32(dev, NV50_PDISPLAY_INTR_1,
356 NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(data));
357 nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
358 NV50_PDISPLAY_INTR_EN) |
359 NV50_PDISPLAY_INTR_EN_VBLANK_CRTC_(data));
360 }
361
362 list_add(&chan->nvsw.vbl_wait, &dev_priv->vbl_waiting);
363 return 0;
364}
365
366static struct nouveau_pgraph_object_method nv50_graph_nvsw_methods[] = {
367 { 0x018c, nv50_graph_nvsw_dma_vblsem },
368 { 0x0400, nv50_graph_nvsw_vblsem_offset },
369 { 0x0404, nv50_graph_nvsw_vblsem_release_val },
370 { 0x0408, nv50_graph_nvsw_vblsem_release },
371 {}
372};
373
374struct nouveau_pgraph_object_class nv50_graph_grclass[] = {
375 { 0x506e, true, nv50_graph_nvsw_methods }, /* nvsw */
376 { 0x0030, false, NULL }, /* null */
377 { 0x5039, false, NULL }, /* m2mf */
378 { 0x502d, false, NULL }, /* 2d */
379 { 0x50c0, false, NULL }, /* compute */
380 { 0x5097, false, NULL }, /* tesla (nv50) */
381 { 0x8297, false, NULL }, /* tesla (nv80/nv90) */
382 { 0x8397, false, NULL }, /* tesla (nva0) */
383 { 0x8597, false, NULL }, /* tesla (nva8) */
384 {}
385};
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 000000000000..94400f777e7f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,509 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 *
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial
16 * portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 */
27
28#include "drmP.h"
29#include "drm.h"
30#include "nouveau_drv.h"
31
32struct nv50_instmem_priv {
33 uint32_t save1700[5]; /* 0x1700->0x1710 */
34
35 struct nouveau_gpuobj_ref *pramin_pt;
36 struct nouveau_gpuobj_ref *pramin_bar;
37 struct nouveau_gpuobj_ref *fb_bar;
38
39 bool last_access_wr;
40};
41
42#define NV50_INSTMEM_PAGE_SHIFT 12
43#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
44#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
45
46/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
47 */
48#define BAR0_WI32(g, o, v) do { \
49 uint32_t offset; \
50 if ((g)->im_backing) { \
51 offset = (g)->im_backing_start; \
52 } else { \
53 offset = chan->ramin->gpuobj->im_backing_start; \
54 offset += (g)->im_pramin->start; \
55 } \
56 offset += (o); \
57 nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
58} while (0)
59
60int
61nv50_instmem_init(struct drm_device *dev)
62{
63 struct drm_nouveau_private *dev_priv = dev->dev_private;
64 struct nouveau_channel *chan;
65 uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
66 struct nv50_instmem_priv *priv;
67 int ret, i;
68 uint32_t v, save_nv001700;
69
70 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
71 if (!priv)
72 return -ENOMEM;
73 dev_priv->engine.instmem.priv = priv;
74
75 /* Save state, will restore at takedown. */
76 for (i = 0x1700; i <= 0x1710; i += 4)
77 priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
78
79 /* Reserve the last MiB of VRAM, we should probably try to avoid
80 * setting up the below tables over the top of the VBIOS image at
81 * some point.
82 */
83 dev_priv->ramin_rsvd_vram = 1 << 20;
84 c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
85 c_size = 128 << 10;
86 c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
87 c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
88 c_base = c_vmpd + 0x4000;
89 pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
90
91 NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
92 NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
93 (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
94 NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
95 NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
96
97 /* Determine VM layout, we need to do this first to make sure
98 * we allocate enough memory for all the page tables.
99 */
100 dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
101 dev_priv->vm_gart_size = NV50_VM_BLOCK;
102
103 dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
104 dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
105 if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
106 dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
107 dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
108 dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
109
110 dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
111
112 NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
113 dev_priv->vm_gart_base,
114 dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
115 NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
116 dev_priv->vm_vram_base,
117 dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
118
119 c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
120
121 /* Map BAR0 PRAMIN aperture over the memory we want to use */
122 save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
123 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
124
125 /* Create a fake channel, and use it as our "dummy" channels 0/127.
126 * The main reason for creating a channel is so we can use the gpuobj
127 * code. However, it's probably worth noting that NVIDIA also setup
128 * their channels 0/127 with the same values they configure here.
129 * So, there may be some other reason for doing this.
130 *
131 * Have to create the entire channel manually, as the real channel
132 * creation code assumes we have PRAMIN access, and we don't until
133 * we're done here.
134 */
135 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
136 if (!chan)
137 return -ENOMEM;
138 chan->id = 0;
139 chan->dev = dev;
140 chan->file_priv = (struct drm_file *)-2;
141 dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
142
143 /* Channel's PRAMIN object + heap */
144 ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
145 NULL, &chan->ramin);
146 if (ret)
147 return ret;
148
149 if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
150 return -ENOMEM;
151
152 /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
153 ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
154 0x4000, 0, NULL, &chan->ramfc);
155 if (ret)
156 return ret;
157
158 for (i = 0; i < c_vmpd; i += 4)
159 BAR0_WI32(chan->ramin->gpuobj, i, 0);
160
161 /* VM page directory */
162 ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
163 0x4000, 0, &chan->vm_pd, NULL);
164 if (ret)
165 return ret;
166 for (i = 0; i < 0x4000; i += 8) {
167 BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
168 BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
169 }
170
171 /* PRAMIN page table, cheat and map into VM at 0x0000000000.
172 * We map the entire fake channel into the start of the PRAMIN BAR
173 */
174 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
175 0, &priv->pramin_pt);
176 if (ret)
177 return ret;
178
179 for (i = 0, v = c_offset; i < pt_size; i += 8, v += 0x1000) {
180 if (v < (c_offset + c_size))
181 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
182 else
183 BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
184 BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
185 }
186
187 BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
188 BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
189
190 /* VRAM page table(s), mapped into VM at +1GiB */
191 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
192 ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
193 NV50_VM_BLOCK/65536*8, 0, 0,
194 &chan->vm_vram_pt[i]);
195 if (ret) {
196 NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
197 ret);
198 dev_priv->vm_vram_pt_nr = i;
199 return ret;
200 }
201 dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
202
203 for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
204 v += 4)
205 BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
206
207 BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
208 chan->vm_vram_pt[i]->instance | 0x61);
209 BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
210 }
211
212 /* DMA object for PRAMIN BAR */
213 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
214 &priv->pramin_bar);
215 if (ret)
216 return ret;
217 BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
218 BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
219 BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
220 BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
221 BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
222 BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
223
224 /* DMA object for FB BAR */
225 ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
226 &priv->fb_bar);
227 if (ret)
228 return ret;
229 BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
230 BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
231 drm_get_resource_len(dev, 1) - 1);
232 BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
233 BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
234 BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
235 BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
236
237 /* Poke the relevant regs, and pray it works :) */
238 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
239 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
240 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
241 NV50_PUNK_BAR_CFG_BASE_VALID);
242 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
243 NV50_PUNK_BAR1_CTXDMA_VALID);
244 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
245 NV50_PUNK_BAR3_CTXDMA_VALID);
246
247 for (i = 0; i < 8; i++)
248 nv_wr32(dev, 0x1900 + (i*4), 0);
249
250 /* Assume that praying isn't enough, check that we can re-read the
251 * entire fake channel back from the PRAMIN BAR */
252 dev_priv->engine.instmem.prepare_access(dev, false);
253 for (i = 0; i < c_size; i += 4) {
254 if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
255 NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
256 i);
257 dev_priv->engine.instmem.finish_access(dev);
258 return -EINVAL;
259 }
260 }
261 dev_priv->engine.instmem.finish_access(dev);
262
263 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
264
265 /* Global PRAMIN heap */
266 if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
267 c_size, dev_priv->ramin_size - c_size)) {
268 dev_priv->ramin_heap = NULL;
269 NV_ERROR(dev, "Failed to init RAMIN heap\n");
270 }
271
272 /*XXX: incorrect, but needed to make hash func "work" */
273 dev_priv->ramht_offset = 0x10000;
274 dev_priv->ramht_bits = 9;
275 dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
276 return 0;
277}
278
279void
280nv50_instmem_takedown(struct drm_device *dev)
281{
282 struct drm_nouveau_private *dev_priv = dev->dev_private;
283 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
284 struct nouveau_channel *chan = dev_priv->fifos[0];
285 int i;
286
287 NV_DEBUG(dev, "\n");
288
289 if (!priv)
290 return;
291
292 /* Restore state from before init */
293 for (i = 0x1700; i <= 0x1710; i += 4)
294 nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
295
296 nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
297 nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
298 nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
299
300 /* Destroy dummy channel */
301 if (chan) {
302 for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
303 nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
304 dev_priv->vm_vram_pt[i] = NULL;
305 }
306 dev_priv->vm_vram_pt_nr = 0;
307
308 nouveau_gpuobj_del(dev, &chan->vm_pd);
309 nouveau_gpuobj_ref_del(dev, &chan->ramfc);
310 nouveau_gpuobj_ref_del(dev, &chan->ramin);
311 nouveau_mem_takedown(&chan->ramin_heap);
312
313 dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
314 kfree(chan);
315 }
316
317 dev_priv->engine.instmem.priv = NULL;
318 kfree(priv);
319}
320
321int
322nv50_instmem_suspend(struct drm_device *dev)
323{
324 struct drm_nouveau_private *dev_priv = dev->dev_private;
325 struct nouveau_channel *chan = dev_priv->fifos[0];
326 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
327 int i;
328
329 ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
330 if (!ramin->im_backing_suspend)
331 return -ENOMEM;
332
333 for (i = 0; i < ramin->im_pramin->size; i += 4)
334 ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
335 return 0;
336}
337
338void
339nv50_instmem_resume(struct drm_device *dev)
340{
341 struct drm_nouveau_private *dev_priv = dev->dev_private;
342 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
343 struct nouveau_channel *chan = dev_priv->fifos[0];
344 struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
345 int i;
346
347 nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
348 for (i = 0; i < ramin->im_pramin->size; i += 4)
349 BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
350 vfree(ramin->im_backing_suspend);
351 ramin->im_backing_suspend = NULL;
352
353 /* Poke the relevant regs, and pray it works :) */
354 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
355 nv_wr32(dev, NV50_PUNK_UNK1710, 0);
356 nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
357 NV50_PUNK_BAR_CFG_BASE_VALID);
358 nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
359 NV50_PUNK_BAR1_CTXDMA_VALID);
360 nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
361 NV50_PUNK_BAR3_CTXDMA_VALID);
362
363 for (i = 0; i < 8; i++)
364 nv_wr32(dev, 0x1900 + (i*4), 0);
365}
366
367int
368nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
369 uint32_t *sz)
370{
371 int ret;
372
373 if (gpuobj->im_backing)
374 return -EINVAL;
375
376 *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
377 if (*sz == 0)
378 return -EINVAL;
379
380 ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
381 true, false, &gpuobj->im_backing);
382 if (ret) {
383 NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
384 return ret;
385 }
386
387 ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
388 if (ret) {
389 NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
390 nouveau_bo_ref(NULL, &gpuobj->im_backing);
391 return ret;
392 }
393
394 gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
395 gpuobj->im_backing_start <<= PAGE_SHIFT;
396
397 return 0;
398}
399
400void
401nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
402{
403 struct drm_nouveau_private *dev_priv = dev->dev_private;
404
405 if (gpuobj && gpuobj->im_backing) {
406 if (gpuobj->im_bound)
407 dev_priv->engine.instmem.unbind(dev, gpuobj);
408 nouveau_bo_unpin(gpuobj->im_backing);
409 nouveau_bo_ref(NULL, &gpuobj->im_backing);
410 gpuobj->im_backing = NULL;
411 }
412}
413
414int
415nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
416{
417 struct drm_nouveau_private *dev_priv = dev->dev_private;
418 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
419 uint32_t pte, pte_end, vram;
420
421 if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
422 return -EINVAL;
423
424 NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
425 gpuobj->im_pramin->start, gpuobj->im_pramin->size);
426
427 pte = (gpuobj->im_pramin->start >> 12) << 3;
428 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
429 vram = gpuobj->im_backing_start;
430
431 NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
432 gpuobj->im_pramin->start, pte, pte_end);
433 NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
434
435 dev_priv->engine.instmem.prepare_access(dev, true);
436 while (pte < pte_end) {
437 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
438 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
439
440 pte += 8;
441 vram += NV50_INSTMEM_PAGE_SIZE;
442 }
443 dev_priv->engine.instmem.finish_access(dev);
444
445 nv_wr32(dev, 0x100c80, 0x00040001);
446 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
447 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
448 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
449 return -EBUSY;
450 }
451
452 nv_wr32(dev, 0x100c80, 0x00060001);
453 if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
454 NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
455 NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
456 return -EBUSY;
457 }
458
459 gpuobj->im_bound = 1;
460 return 0;
461}
462
463int
464nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
465{
466 struct drm_nouveau_private *dev_priv = dev->dev_private;
467 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
468 uint32_t pte, pte_end;
469
470 if (gpuobj->im_bound == 0)
471 return -EINVAL;
472
473 pte = (gpuobj->im_pramin->start >> 12) << 3;
474 pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
475
476 dev_priv->engine.instmem.prepare_access(dev, true);
477 while (pte < pte_end) {
478 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
479 nv_wo32(dev, priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
480 pte += 8;
481 }
482 dev_priv->engine.instmem.finish_access(dev);
483
484 gpuobj->im_bound = 0;
485 return 0;
486}
487
488void
489nv50_instmem_prepare_access(struct drm_device *dev, bool write)
490{
491 struct drm_nouveau_private *dev_priv = dev->dev_private;
492 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
493
494 priv->last_access_wr = write;
495}
496
497void
498nv50_instmem_finish_access(struct drm_device *dev)
499{
500 struct drm_nouveau_private *dev_priv = dev->dev_private;
501 struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
502
503 if (priv->last_access_wr) {
504 nv_wr32(dev, 0x070000, 0x00000001);
505 if (!nv_wait(0x070000, 0x00000001, 0x00000000))
506 NV_ERROR(dev, "PRAMIN flush timeout\n");
507 }
508}
509
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 000000000000..e0a9c3faa202
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) 2007 Ben Skeggs.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm.h"
29#include "nouveau_drv.h"
30
31int
32nv50_mc_init(struct drm_device *dev)
33{
34 nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
35 return 0;
36}
37
38void nv50_mc_takedown(struct drm_device *dev)
39{
40}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
new file mode 100644
index 000000000000..8c280463a664
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -0,0 +1,309 @@
1/*
2 * Copyright (C) 2008 Maarten Maathuis.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#include "drmP.h"
28#include "drm_crtc_helper.h"
29
30#define NOUVEAU_DMA_DEBUG (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
31#include "nouveau_reg.h"
32#include "nouveau_drv.h"
33#include "nouveau_dma.h"
34#include "nouveau_encoder.h"
35#include "nouveau_connector.h"
36#include "nouveau_crtc.h"
37#include "nv50_display.h"
38
39static void
40nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
41{
42 struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
43 struct drm_nouveau_private *dev_priv = dev->dev_private;
44 struct nouveau_channel *evo = dev_priv->evo;
45 int ret;
46
47 NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
48
49 ret = RING_SPACE(evo, 2);
50 if (ret) {
51 NV_ERROR(dev, "no space while disconnecting SOR\n");
52 return;
53 }
54 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
55 OUT_RING(evo, 0);
56}
57
58static void
59nv50_sor_dp_link_train(struct drm_encoder *encoder)
60{
61 struct drm_device *dev = encoder->dev;
62 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
63 struct bit_displayport_encoder_table *dpe;
64 int dpe_headerlen;
65
66 dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
67 if (!dpe) {
68 NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
69 return;
70 }
71
72 if (dpe->script0) {
73 NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
74 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
75 nv_encoder->dcb);
76 }
77
78 if (!nouveau_dp_link_train(encoder))
79 NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
80
81 if (dpe->script1) {
82 NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
83 nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
84 nv_encoder->dcb);
85 }
86}
87
88static void
89nv50_sor_dpms(struct drm_encoder *encoder, int mode)
90{
91 struct drm_device *dev = encoder->dev;
92 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
93 uint32_t val;
94 int or = nv_encoder->or;
95
96 NV_DEBUG(dev, "or %d mode %d\n", or, mode);
97
98 /* wait for it to be done */
99 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
100 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING, 0)) {
101 NV_ERROR(dev, "timeout: SOR_DPMS_CTRL_PENDING(%d) == 0\n", or);
102 NV_ERROR(dev, "SOR_DPMS_CTRL(%d) = 0x%08x\n", or,
103 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or)));
104 }
105
106 val = nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or));
107
108 if (mode == DRM_MODE_DPMS_ON)
109 val |= NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
110 else
111 val &= ~NV50_PDISPLAY_SOR_DPMS_CTRL_ON;
112
113 nv_wr32(dev, NV50_PDISPLAY_SOR_DPMS_CTRL(or), val |
114 NV50_PDISPLAY_SOR_DPMS_CTRL_PENDING);
115 if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(or),
116 NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
117 NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", or);
118 NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", or,
119 nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
120 }
121
122 if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
123 nv50_sor_dp_link_train(encoder);
124}
125
126static void
127nv50_sor_save(struct drm_encoder *encoder)
128{
129 NV_ERROR(encoder->dev, "!!\n");
130}
131
132static void
133nv50_sor_restore(struct drm_encoder *encoder)
134{
135 NV_ERROR(encoder->dev, "!!\n");
136}
137
138static bool
139nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
140 struct drm_display_mode *adjusted_mode)
141{
142 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
143 struct nouveau_connector *connector;
144
145 NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
146
147 connector = nouveau_encoder_connector_get(nv_encoder);
148 if (!connector) {
149 NV_ERROR(encoder->dev, "Encoder has no connector\n");
150 return false;
151 }
152
153 if (connector->scaling_mode != DRM_MODE_SCALE_NONE &&
154 connector->native_mode) {
155 int id = adjusted_mode->base.id;
156 *adjusted_mode = *connector->native_mode;
157 adjusted_mode->base.id = id;
158 }
159
160 return true;
161}
162
163static void
164nv50_sor_prepare(struct drm_encoder *encoder)
165{
166}
167
168static void
169nv50_sor_commit(struct drm_encoder *encoder)
170{
171}
172
173static void
174nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
175 struct drm_display_mode *adjusted_mode)
176{
177 struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
178 struct nouveau_channel *evo = dev_priv->evo;
179 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
180 struct drm_device *dev = encoder->dev;
181 struct nouveau_crtc *crtc = nouveau_crtc(encoder->crtc);
182 uint32_t mode_ctl = 0;
183 int ret;
184
185 NV_DEBUG(dev, "or %d\n", nv_encoder->or);
186
187 nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
188
189 switch (nv_encoder->dcb->type) {
190 case OUTPUT_TMDS:
191 if (nv_encoder->dcb->sorconf.link & 1) {
192 if (adjusted_mode->clock < 165000)
193 mode_ctl = 0x0100;
194 else
195 mode_ctl = 0x0500;
196 } else
197 mode_ctl = 0x0200;
198 break;
199 case OUTPUT_DP:
200 mode_ctl |= 0x00050000;
201 if (nv_encoder->dcb->sorconf.link & 1)
202 mode_ctl |= 0x00000800;
203 else
204 mode_ctl |= 0x00000900;
205 break;
206 default:
207 break;
208 }
209
210 if (crtc->index == 1)
211 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC1;
212 else
213 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
214
215 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
216 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
217
218 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
219 mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
220
221 ret = RING_SPACE(evo, 2);
222 if (ret) {
223 NV_ERROR(dev, "no space while connecting SOR\n");
224 return;
225 }
226 BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
227 OUT_RING(evo, mode_ctl);
228}
229
230static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
231 .dpms = nv50_sor_dpms,
232 .save = nv50_sor_save,
233 .restore = nv50_sor_restore,
234 .mode_fixup = nv50_sor_mode_fixup,
235 .prepare = nv50_sor_prepare,
236 .commit = nv50_sor_commit,
237 .mode_set = nv50_sor_mode_set,
238 .detect = NULL
239};
240
241static void
242nv50_sor_destroy(struct drm_encoder *encoder)
243{
244 struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
245
246 if (!encoder)
247 return;
248
249 NV_DEBUG(encoder->dev, "\n");
250
251 drm_encoder_cleanup(encoder);
252
253 kfree(nv_encoder);
254}
255
256static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
257 .destroy = nv50_sor_destroy,
258};
259
260int
261nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
262{
263 struct nouveau_encoder *nv_encoder = NULL;
264 struct drm_encoder *encoder;
265 bool dum;
266 int type;
267
268 NV_DEBUG(dev, "\n");
269
270 switch (entry->type) {
271 case OUTPUT_TMDS:
272 NV_INFO(dev, "Detected a TMDS output\n");
273 type = DRM_MODE_ENCODER_TMDS;
274 break;
275 case OUTPUT_LVDS:
276 NV_INFO(dev, "Detected a LVDS output\n");
277 type = DRM_MODE_ENCODER_LVDS;
278
279 if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
280 NV_ERROR(dev, "Failed parsing LVDS table\n");
281 return -EINVAL;
282 }
283 break;
284 case OUTPUT_DP:
285 NV_INFO(dev, "Detected a DP output\n");
286 type = DRM_MODE_ENCODER_TMDS;
287 break;
288 default:
289 return -EINVAL;
290 }
291
292 nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
293 if (!nv_encoder)
294 return -ENOMEM;
295 encoder = to_drm_encoder(nv_encoder);
296
297 nv_encoder->dcb = entry;
298 nv_encoder->or = ffs(entry->or) - 1;
299
300 nv_encoder->disconnect = nv50_sor_disconnect;
301
302 drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
303 drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
304
305 encoder->possible_crtcs = entry->heads;
306 encoder->possible_clones = 0;
307
308 return 0;
309}
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
new file mode 100644
index 000000000000..5998c35237b0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -0,0 +1,535 @@
1/* $XConsortium: nvreg.h /main/2 1996/10/28 05:13:41 kaleb $ */
2/*
3 * Copyright 1996-1997 David J. McKay
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * DAVID J. MCKAY BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
19 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
20 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24/* $XFree86: xc/programs/Xserver/hw/xfree86/drivers/nv/nvreg.h,v 1.6 2002/01/25 21:56:06 tsi Exp $ */
25
26#ifndef __NVREG_H_
27#define __NVREG_H_
28
29#define NV_PMC_OFFSET 0x00000000
30#define NV_PMC_SIZE 0x00001000
31
32#define NV_PBUS_OFFSET 0x00001000
33#define NV_PBUS_SIZE 0x00001000
34
35#define NV_PFIFO_OFFSET 0x00002000
36#define NV_PFIFO_SIZE 0x00002000
37
38#define NV_HDIAG_OFFSET 0x00005000
39#define NV_HDIAG_SIZE 0x00001000
40
41#define NV_PRAM_OFFSET 0x00006000
42#define NV_PRAM_SIZE 0x00001000
43
44#define NV_PVIDEO_OFFSET 0x00008000
45#define NV_PVIDEO_SIZE 0x00001000
46
47#define NV_PTIMER_OFFSET 0x00009000
48#define NV_PTIMER_SIZE 0x00001000
49
50#define NV_PPM_OFFSET 0x0000A000
51#define NV_PPM_SIZE 0x00001000
52
53#define NV_PTV_OFFSET 0x0000D000
54#define NV_PTV_SIZE 0x00001000
55
56#define NV_PRMVGA_OFFSET 0x000A0000
57#define NV_PRMVGA_SIZE 0x00020000
58
59#define NV_PRMVIO0_OFFSET 0x000C0000
60#define NV_PRMVIO_SIZE 0x00002000
61#define NV_PRMVIO1_OFFSET 0x000C2000
62
63#define NV_PFB_OFFSET 0x00100000
64#define NV_PFB_SIZE 0x00001000
65
66#define NV_PEXTDEV_OFFSET 0x00101000
67#define NV_PEXTDEV_SIZE 0x00001000
68
69#define NV_PME_OFFSET 0x00200000
70#define NV_PME_SIZE 0x00001000
71
72#define NV_PROM_OFFSET 0x00300000
73#define NV_PROM_SIZE 0x00010000
74
75#define NV_PGRAPH_OFFSET 0x00400000
76#define NV_PGRAPH_SIZE 0x00010000
77
78#define NV_PCRTC0_OFFSET 0x00600000
79#define NV_PCRTC0_SIZE 0x00002000 /* empirical */
80
81#define NV_PRMCIO0_OFFSET 0x00601000
82#define NV_PRMCIO_SIZE 0x00002000
83#define NV_PRMCIO1_OFFSET 0x00603000
84
85#define NV50_DISPLAY_OFFSET 0x00610000
86#define NV50_DISPLAY_SIZE 0x0000FFFF
87
88#define NV_PRAMDAC0_OFFSET 0x00680000
89#define NV_PRAMDAC0_SIZE 0x00002000
90
91#define NV_PRMDIO0_OFFSET 0x00681000
92#define NV_PRMDIO_SIZE 0x00002000
93#define NV_PRMDIO1_OFFSET 0x00683000
94
95#define NV_PRAMIN_OFFSET 0x00700000
96#define NV_PRAMIN_SIZE 0x00100000
97
98#define NV_FIFO_OFFSET 0x00800000
99#define NV_FIFO_SIZE 0x00800000
100
101#define NV_PMC_BOOT_0 0x00000000
102#define NV_PMC_ENABLE 0x00000200
103
104#define NV_VIO_VSE2 0x000003c3
105#define NV_VIO_SRX 0x000003c4
106
107#define NV_CIO_CRX__COLOR 0x000003d4
108#define NV_CIO_CR__COLOR 0x000003d5
109
110#define NV_PBUS_DEBUG_1 0x00001084
111#define NV_PBUS_DEBUG_4 0x00001098
112#define NV_PBUS_DEBUG_DUALHEAD_CTL 0x000010f0
113#define NV_PBUS_POWERCTRL_1 0x00001584
114#define NV_PBUS_POWERCTRL_2 0x00001588
115#define NV_PBUS_POWERCTRL_4 0x00001590
116#define NV_PBUS_PCI_NV_19 0x0000184C
117#define NV_PBUS_PCI_NV_20 0x00001850
118# define NV_PBUS_PCI_NV_20_ROM_SHADOW_DISABLED (0 << 0)
119# define NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED (1 << 0)
120
121#define NV_PFIFO_RAMHT 0x00002210
122
123#define NV_PTV_TV_INDEX 0x0000d220
124#define NV_PTV_TV_DATA 0x0000d224
125#define NV_PTV_HFILTER 0x0000d310
126#define NV_PTV_HFILTER2 0x0000d390
127#define NV_PTV_VFILTER 0x0000d510
128
129#define NV_PRMVIO_MISC__WRITE 0x000c03c2
130#define NV_PRMVIO_SRX 0x000c03c4
131#define NV_PRMVIO_SR 0x000c03c5
132# define NV_VIO_SR_RESET_INDEX 0x00
133# define NV_VIO_SR_CLOCK_INDEX 0x01
134# define NV_VIO_SR_PLANE_MASK_INDEX 0x02
135# define NV_VIO_SR_CHAR_MAP_INDEX 0x03
136# define NV_VIO_SR_MEM_MODE_INDEX 0x04
137#define NV_PRMVIO_MISC__READ 0x000c03cc
138#define NV_PRMVIO_GRX 0x000c03ce
139#define NV_PRMVIO_GX 0x000c03cf
140# define NV_VIO_GX_SR_INDEX 0x00
141# define NV_VIO_GX_SREN_INDEX 0x01
142# define NV_VIO_GX_CCOMP_INDEX 0x02
143# define NV_VIO_GX_ROP_INDEX 0x03
144# define NV_VIO_GX_READ_MAP_INDEX 0x04
145# define NV_VIO_GX_MODE_INDEX 0x05
146# define NV_VIO_GX_MISC_INDEX 0x06
147# define NV_VIO_GX_DONT_CARE_INDEX 0x07
148# define NV_VIO_GX_BIT_MASK_INDEX 0x08
149
150#define NV_PFB_BOOT_0 0x00100000
151#define NV_PFB_CFG0 0x00100200
152#define NV_PFB_CFG1 0x00100204
153#define NV_PFB_CSTATUS 0x0010020C
154#define NV_PFB_REFCTRL 0x00100210
155# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
156#define NV_PFB_PAD 0x0010021C
157# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
158#define NV_PFB_TILE_NV10 0x00100240
159#define NV_PFB_TILE_SIZE_NV10 0x00100244
160#define NV_PFB_REF 0x001002D0
161# define NV_PFB_REF_CMD_REFRESH (1 << 0)
162#define NV_PFB_PRE 0x001002D4
163# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
164#define NV_PFB_CLOSE_PAGE2 0x0010033C
165#define NV_PFB_TILE_NV40 0x00100600
166#define NV_PFB_TILE_SIZE_NV40 0x00100604
167
168#define NV_PEXTDEV_BOOT_0 0x00101000
169# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
170#define NV_PEXTDEV_BOOT_3 0x0010100c
171
172#define NV_PCRTC_INTR_0 0x00600100
173# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
174#define NV_PCRTC_INTR_EN_0 0x00600140
175#define NV_PCRTC_START 0x00600800
176#define NV_PCRTC_CONFIG 0x00600804
177# define NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA (1 << 0)
178# define NV_PCRTC_CONFIG_START_ADDRESS_HSYNC (2 << 0)
179#define NV_PCRTC_CURSOR_CONFIG 0x00600810
180# define NV_PCRTC_CURSOR_CONFIG_ENABLE_ENABLE (1 << 0)
181# define NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE (1 << 4)
182# define NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM (1 << 8)
183# define NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32 (1 << 12)
184# define NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 (1 << 16)
185# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_32 (2 << 24)
186# define NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 (4 << 24)
187# define NV_PCRTC_CURSOR_CONFIG_CUR_BLEND_ALPHA (1 << 28)
188
189/* note: PCRTC_GPIO is not available on nv10, and in fact aliases 0x600810 */
190#define NV_PCRTC_GPIO 0x00600818
191#define NV_PCRTC_GPIO_EXT 0x0060081c
192#define NV_PCRTC_830 0x00600830
193#define NV_PCRTC_834 0x00600834
194#define NV_PCRTC_850 0x00600850
195#define NV_PCRTC_ENGINE_CTRL 0x00600860
196# define NV_CRTC_FSEL_I2C (1 << 4)
197# define NV_CRTC_FSEL_OVERLAY (1 << 12)
198
199#define NV_PRMCIO_ARX 0x006013c0
200#define NV_PRMCIO_AR__WRITE 0x006013c0
201#define NV_PRMCIO_AR__READ 0x006013c1
202# define NV_CIO_AR_MODE_INDEX 0x10
203# define NV_CIO_AR_OSCAN_INDEX 0x11
204# define NV_CIO_AR_PLANE_INDEX 0x12
205# define NV_CIO_AR_HPP_INDEX 0x13
206# define NV_CIO_AR_CSEL_INDEX 0x14
207#define NV_PRMCIO_INP0 0x006013c2
208#define NV_PRMCIO_CRX__COLOR 0x006013d4
209#define NV_PRMCIO_CR__COLOR 0x006013d5
210 /* Standard VGA CRTC registers */
211# define NV_CIO_CR_HDT_INDEX 0x00 /* horizontal display total */
212# define NV_CIO_CR_HDE_INDEX 0x01 /* horizontal display end */
213# define NV_CIO_CR_HBS_INDEX 0x02 /* horizontal blanking start */
214# define NV_CIO_CR_HBE_INDEX 0x03 /* horizontal blanking end */
215# define NV_CIO_CR_HBE_4_0 4:0
216# define NV_CIO_CR_HRS_INDEX 0x04 /* horizontal retrace start */
217# define NV_CIO_CR_HRE_INDEX 0x05 /* horizontal retrace end */
218# define NV_CIO_CR_HRE_4_0 4:0
219# define NV_CIO_CR_HRE_HBE_5 7:7
220# define NV_CIO_CR_VDT_INDEX 0x06 /* vertical display total */
221# define NV_CIO_CR_OVL_INDEX 0x07 /* overflow bits */
222# define NV_CIO_CR_OVL_VDT_8 0:0
223# define NV_CIO_CR_OVL_VDE_8 1:1
224# define NV_CIO_CR_OVL_VRS_8 2:2
225# define NV_CIO_CR_OVL_VBS_8 3:3
226# define NV_CIO_CR_OVL_VDT_9 5:5
227# define NV_CIO_CR_OVL_VDE_9 6:6
228# define NV_CIO_CR_OVL_VRS_9 7:7
229# define NV_CIO_CR_RSAL_INDEX 0x08 /* normally "preset row scan" */
230# define NV_CIO_CR_CELL_HT_INDEX 0x09 /* cell height?! normally "max scan line" */
231# define NV_CIO_CR_CELL_HT_VBS_9 5:5
232# define NV_CIO_CR_CELL_HT_SCANDBL 7:7
233# define NV_CIO_CR_CURS_ST_INDEX 0x0a /* cursor start */
234# define NV_CIO_CR_CURS_END_INDEX 0x0b /* cursor end */
235# define NV_CIO_CR_SA_HI_INDEX 0x0c /* screen start address high */
236# define NV_CIO_CR_SA_LO_INDEX 0x0d /* screen start address low */
237# define NV_CIO_CR_TCOFF_HI_INDEX 0x0e /* cursor offset high */
238# define NV_CIO_CR_TCOFF_LO_INDEX 0x0f /* cursor offset low */
239# define NV_CIO_CR_VRS_INDEX 0x10 /* vertical retrace start */
240# define NV_CIO_CR_VRE_INDEX 0x11 /* vertical retrace end */
241# define NV_CIO_CR_VRE_3_0 3:0
242# define NV_CIO_CR_VDE_INDEX 0x12 /* vertical display end */
243# define NV_CIO_CR_OFFSET_INDEX 0x13 /* sets screen pitch */
244# define NV_CIO_CR_ULINE_INDEX 0x14 /* underline location */
245# define NV_CIO_CR_VBS_INDEX 0x15 /* vertical blank start */
246# define NV_CIO_CR_VBE_INDEX 0x16 /* vertical blank end */
247# define NV_CIO_CR_MODE_INDEX 0x17 /* crtc mode control */
248# define NV_CIO_CR_LCOMP_INDEX 0x18 /* line compare */
249 /* Extended VGA CRTC registers */
250# define NV_CIO_CRE_RPC0_INDEX 0x19 /* repaint control 0 */
251# define NV_CIO_CRE_RPC0_OFFSET_10_8 7:5
252# define NV_CIO_CRE_RPC1_INDEX 0x1a /* repaint control 1 */
253# define NV_CIO_CRE_RPC1_LARGE 2:2
254# define NV_CIO_CRE_FF_INDEX 0x1b /* fifo control */
255# define NV_CIO_CRE_ENH_INDEX 0x1c /* enhanced? */
256# define NV_CIO_SR_LOCK_INDEX 0x1f /* crtc lock */
257# define NV_CIO_SR_UNLOCK_RW_VALUE 0x57
258# define NV_CIO_SR_LOCK_VALUE 0x99
259# define NV_CIO_CRE_FFLWM__INDEX 0x20 /* fifo low water mark */
260# define NV_CIO_CRE_21 0x21 /* vga shadow crtc lock */
261# define NV_CIO_CRE_LSR_INDEX 0x25 /* ? */
262# define NV_CIO_CRE_LSR_VDT_10 0:0
263# define NV_CIO_CRE_LSR_VDE_10 1:1
264# define NV_CIO_CRE_LSR_VRS_10 2:2
265# define NV_CIO_CRE_LSR_VBS_10 3:3
266# define NV_CIO_CRE_LSR_HBE_6 4:4
267# define NV_CIO_CR_ARX_INDEX 0x26 /* attribute index -- ro copy of 0x60.3c0 */
268# define NV_CIO_CRE_CHIP_ID_INDEX 0x27 /* chip revision */
269# define NV_CIO_CRE_PIXEL_INDEX 0x28
270# define NV_CIO_CRE_PIXEL_FORMAT 1:0
271# define NV_CIO_CRE_HEB__INDEX 0x2d /* horizontal extra bits? */
272# define NV_CIO_CRE_HEB_HDT_8 0:0
273# define NV_CIO_CRE_HEB_HDE_8 1:1
274# define NV_CIO_CRE_HEB_HBS_8 2:2
275# define NV_CIO_CRE_HEB_HRS_8 3:3
276# define NV_CIO_CRE_HEB_ILC_8 4:4
277# define NV_CIO_CRE_2E 0x2e /* some scratch or dummy reg to force writes to sink in */
278# define NV_CIO_CRE_HCUR_ADDR2_INDEX 0x2f /* cursor */
279# define NV_CIO_CRE_HCUR_ADDR0_INDEX 0x30 /* pixmap */
280# define NV_CIO_CRE_HCUR_ADDR0_ADR 6:0
281# define NV_CIO_CRE_HCUR_ASI 7:7
282# define NV_CIO_CRE_HCUR_ADDR1_INDEX 0x31 /* address */
283# define NV_CIO_CRE_HCUR_ADDR1_ENABLE 0:0
284# define NV_CIO_CRE_HCUR_ADDR1_CUR_DBL 1:1
285# define NV_CIO_CRE_HCUR_ADDR1_ADR 7:2
286# define NV_CIO_CRE_LCD__INDEX 0x33
287# define NV_CIO_CRE_LCD_LCD_SELECT 0:0
288# define NV_CIO_CRE_DDC0_STATUS__INDEX 0x36
289# define NV_CIO_CRE_DDC0_WR__INDEX 0x37
290# define NV_CIO_CRE_ILACE__INDEX 0x39 /* interlace */
291# define NV_CIO_CRE_SCRATCH3__INDEX 0x3b
292# define NV_CIO_CRE_SCRATCH4__INDEX 0x3c
293# define NV_CIO_CRE_DDC_STATUS__INDEX 0x3e
294# define NV_CIO_CRE_DDC_WR__INDEX 0x3f
295# define NV_CIO_CRE_EBR_INDEX 0x41 /* extra bits ? (vertical) */
296# define NV_CIO_CRE_EBR_VDT_11 0:0
297# define NV_CIO_CRE_EBR_VDE_11 2:2
298# define NV_CIO_CRE_EBR_VRS_11 4:4
299# define NV_CIO_CRE_EBR_VBS_11 6:6
300# define NV_CIO_CRE_43 0x43
301# define NV_CIO_CRE_44 0x44 /* head control */
302# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
303# define NV_CIO_CRE_RCR 0x46
304# define NV_CIO_CRE_RCR_ENDIAN_BIG 7:7
305# define NV_CIO_CRE_47 0x47 /* extended fifo lwm, used on nv30+ */
306# define NV_CIO_CRE_49 0x49
307# define NV_CIO_CRE_4B 0x4b /* given patterns in 0x[2-3][a-c] regs, probably scratch 6 */
308# define NV_CIO_CRE_TVOUT_LATENCY 0x52
309# define NV_CIO_CRE_53 0x53 /* `fp_htiming' according to Haiku */
310# define NV_CIO_CRE_54 0x54 /* `fp_vtiming' according to Haiku */
311# define NV_CIO_CRE_57 0x57 /* index reg for cr58 */
312# define NV_CIO_CRE_58 0x58 /* data reg for cr57 */
313# define NV_CIO_CRE_59 0x59 /* related to on/off-chip-ness of digital outputs */
314# define NV_CIO_CRE_5B 0x5B /* newer colour saturation reg */
315# define NV_CIO_CRE_85 0x85
316# define NV_CIO_CRE_86 0x86
317#define NV_PRMCIO_INP0__COLOR 0x006013da
318
319#define NV_PRAMDAC_CU_START_POS 0x00680300
320# define NV_PRAMDAC_CU_START_POS_X 15:0
321# define NV_PRAMDAC_CU_START_POS_Y 31:16
322#define NV_RAMDAC_NV10_CURSYNC 0x00680404
323
324#define NV_PRAMDAC_NVPLL_COEFF 0x00680500
325#define NV_PRAMDAC_MPLL_COEFF 0x00680504
326#define NV_PRAMDAC_VPLL_COEFF 0x00680508
327# define NV30_RAMDAC_ENABLE_VCO2 (8 << 4)
328
329#define NV_PRAMDAC_PLL_COEFF_SELECT 0x0068050c
330# define NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE (4 << 0)
331# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL (1 << 8)
332# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_VPLL (2 << 8)
333# define NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL (4 << 8)
334# define NV_PRAMDAC_PLL_COEFF_SELECT_PLL_SOURCE_VPLL2 (8 << 8)
335# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 (1 << 16)
336# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1 (2 << 16)
337# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK2 (4 << 16)
338# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK2 (8 << 16)
339# define NV_PRAMDAC_PLL_COEFF_SELECT_TV_CLK_SOURCE_VIP (1 << 20)
340# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK_RATIO_DB2 (1 << 28)
341# define NV_PRAMDAC_PLL_COEFF_SELECT_VCLK2_RATIO_DB2 (2 << 28)
342
343#define NV_PRAMDAC_PLL_SETUP_CONTROL 0x00680510
344#define NV_RAMDAC_VPLL2 0x00680520
345#define NV_PRAMDAC_SEL_CLK 0x00680524
346#define NV_RAMDAC_DITHER_NV11 0x00680528
347#define NV_PRAMDAC_DACCLK 0x0068052c
348# define NV_PRAMDAC_DACCLK_SEL_DACCLK (1 << 0)
349
350#define NV_RAMDAC_NVPLL_B 0x00680570
351#define NV_RAMDAC_MPLL_B 0x00680574
352#define NV_RAMDAC_VPLL_B 0x00680578
353#define NV_RAMDAC_VPLL2_B 0x0068057c
354# define NV31_RAMDAC_ENABLE_VCO2 (8 << 28)
355#define NV_PRAMDAC_580 0x00680580
356# define NV_RAMDAC_580_VPLL1_ACTIVE (1 << 8)
357# define NV_RAMDAC_580_VPLL2_ACTIVE (1 << 28)
358
359#define NV_PRAMDAC_GENERAL_CONTROL 0x00680600
360# define NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON (3 << 4)
361# define NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL (1 << 8)
362# define NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL (1 << 12)
363# define NV_PRAMDAC_GENERAL_CONTROL_TERMINATION_75OHM (2 << 16)
364# define NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS (1 << 20)
365# define NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG (2 << 28)
366#define NV_PRAMDAC_TEST_CONTROL 0x00680608
367# define NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED (1 << 12)
368# define NV_PRAMDAC_TEST_CONTROL_PWRDWN_DAC_OFF (1 << 16)
369# define NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI (1 << 28)
370#define NV_PRAMDAC_TESTPOINT_DATA 0x00680610
371# define NV_PRAMDAC_TESTPOINT_DATA_NOTBLANK (8 << 28)
372#define NV_PRAMDAC_630 0x00680630
373#define NV_PRAMDAC_634 0x00680634
374
375#define NV_PRAMDAC_TV_SETUP 0x00680700
376#define NV_PRAMDAC_TV_VTOTAL 0x00680720
377#define NV_PRAMDAC_TV_VSKEW 0x00680724
378#define NV_PRAMDAC_TV_VSYNC_DELAY 0x00680728
379#define NV_PRAMDAC_TV_HTOTAL 0x0068072c
380#define NV_PRAMDAC_TV_HSKEW 0x00680730
381#define NV_PRAMDAC_TV_HSYNC_DELAY 0x00680734
382#define NV_PRAMDAC_TV_HSYNC_DELAY2 0x00680738
383
384#define NV_PRAMDAC_TV_SETUP 0x00680700
385
386#define NV_PRAMDAC_FP_VDISPLAY_END 0x00680800
387#define NV_PRAMDAC_FP_VTOTAL 0x00680804
388#define NV_PRAMDAC_FP_VCRTC 0x00680808
389#define NV_PRAMDAC_FP_VSYNC_START 0x0068080c
390#define NV_PRAMDAC_FP_VSYNC_END 0x00680810
391#define NV_PRAMDAC_FP_VVALID_START 0x00680814
392#define NV_PRAMDAC_FP_VVALID_END 0x00680818
393#define NV_PRAMDAC_FP_HDISPLAY_END 0x00680820
394#define NV_PRAMDAC_FP_HTOTAL 0x00680824
395#define NV_PRAMDAC_FP_HCRTC 0x00680828
396#define NV_PRAMDAC_FP_HSYNC_START 0x0068082c
397#define NV_PRAMDAC_FP_HSYNC_END 0x00680830
398#define NV_PRAMDAC_FP_HVALID_START 0x00680834
399#define NV_PRAMDAC_FP_HVALID_END 0x00680838
400
401#define NV_RAMDAC_FP_DITHER 0x0068083c
402#define NV_PRAMDAC_FP_TG_CONTROL 0x00680848
403# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS (1 << 0)
404# define NV_PRAMDAC_FP_TG_CONTROL_VSYNC_DISABLE (2 << 0)
405# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS (1 << 4)
406# define NV_PRAMDAC_FP_TG_CONTROL_HSYNC_DISABLE (2 << 4)
407# define NV_PRAMDAC_FP_TG_CONTROL_MODE_SCALE (0 << 8)
408# define NV_PRAMDAC_FP_TG_CONTROL_MODE_CENTER (1 << 8)
409# define NV_PRAMDAC_FP_TG_CONTROL_MODE_NATIVE (2 << 8)
410# define NV_PRAMDAC_FP_TG_CONTROL_READ_PROG (1 << 20)
411# define NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 (1 << 24)
412# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS (1 << 28)
413# define NV_PRAMDAC_FP_TG_CONTROL_DISPEN_DISABLE (2 << 28)
414#define NV_PRAMDAC_FP_MARGIN_COLOR 0x0068084c
415#define NV_PRAMDAC_850 0x00680850
416#define NV_PRAMDAC_85C 0x0068085c
417#define NV_PRAMDAC_FP_DEBUG_0 0x00680880
418# define NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE (1 << 0)
419# define NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE (1 << 4)
420/* This doesn't seem to be essential for tmds, but still often set */
421# define NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED (8 << 4)
422# define NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR (1 << 8)
423# define NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR (1 << 12)
424# define NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND (1 << 20)
425# define NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND (1 << 24)
426# define NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK (1 << 28)
427#define NV_PRAMDAC_FP_DEBUG_1 0x00680884
428# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_VALUE 11:0
429# define NV_PRAMDAC_FP_DEBUG_1_XSCALE_TESTMODE_ENABLE (1 << 12)
430# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_VALUE 27:16
431# define NV_PRAMDAC_FP_DEBUG_1_YSCALE_TESTMODE_ENABLE (1 << 28)
432#define NV_PRAMDAC_FP_DEBUG_2 0x00680888
433#define NV_PRAMDAC_FP_DEBUG_3 0x0068088C
434
435/* see NV_PRAMDAC_INDIR_TMDS in rules.xml */
436#define NV_PRAMDAC_FP_TMDS_CONTROL 0x006808b0
437# define NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE (1 << 16)
438#define NV_PRAMDAC_FP_TMDS_DATA 0x006808b4
439
440#define NV_PRAMDAC_8C0 0x006808c0
441
442/* Some kind of switch */
443#define NV_PRAMDAC_900 0x00680900
444#define NV_PRAMDAC_A20 0x00680A20
445#define NV_PRAMDAC_A24 0x00680A24
446#define NV_PRAMDAC_A34 0x00680A34
447
448#define NV_PRAMDAC_CTV 0x00680c00
449
450/* names fabricated from NV_USER_DAC info */
451#define NV_PRMDIO_PIXEL_MASK 0x006813c6
452# define NV_PRMDIO_PIXEL_MASK_MASK 0xff
453#define NV_PRMDIO_READ_MODE_ADDRESS 0x006813c7
454#define NV_PRMDIO_WRITE_MODE_ADDRESS 0x006813c8
455#define NV_PRMDIO_PALETTE_DATA 0x006813c9
456
457#define NV_PGRAPH_DEBUG_0 0x00400080
458#define NV_PGRAPH_DEBUG_1 0x00400084
459#define NV_PGRAPH_DEBUG_2_NV04 0x00400088
460#define NV_PGRAPH_DEBUG_2 0x00400620
461#define NV_PGRAPH_DEBUG_3 0x0040008c
462#define NV_PGRAPH_DEBUG_4 0x00400090
463#define NV_PGRAPH_INTR 0x00400100
464#define NV_PGRAPH_INTR_EN 0x00400140
465#define NV_PGRAPH_CTX_CONTROL 0x00400144
466#define NV_PGRAPH_CTX_CONTROL_NV04 0x00400170
467#define NV_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
468#define NV_PGRAPH_ABS_UCLIP_YMIN 0x00400540
469#define NV_PGRAPH_ABS_UCLIP_XMAX 0x00400544
470#define NV_PGRAPH_ABS_UCLIP_YMAX 0x00400548
471#define NV_PGRAPH_BETA_AND 0x00400608
472#define NV_PGRAPH_LIMIT_VIOL_PIX 0x00400610
473#define NV_PGRAPH_BOFFSET0 0x00400640
474#define NV_PGRAPH_BOFFSET1 0x00400644
475#define NV_PGRAPH_BOFFSET2 0x00400648
476#define NV_PGRAPH_BLIMIT0 0x00400684
477#define NV_PGRAPH_BLIMIT1 0x00400688
478#define NV_PGRAPH_BLIMIT2 0x0040068c
479#define NV_PGRAPH_STATUS 0x00400700
480#define NV_PGRAPH_SURFACE 0x00400710
481#define NV_PGRAPH_STATE 0x00400714
482#define NV_PGRAPH_FIFO 0x00400720
483#define NV_PGRAPH_PATTERN_SHAPE 0x00400810
484#define NV_PGRAPH_TILE 0x00400b00
485
486#define NV_PVIDEO_INTR_EN 0x00008140
487#define NV_PVIDEO_BUFFER 0x00008700
488#define NV_PVIDEO_STOP 0x00008704
489#define NV_PVIDEO_UVPLANE_BASE(buff) (0x00008800+(buff)*4)
490#define NV_PVIDEO_UVPLANE_LIMIT(buff) (0x00008808+(buff)*4)
491#define NV_PVIDEO_UVPLANE_OFFSET_BUFF(buff) (0x00008820+(buff)*4)
492#define NV_PVIDEO_BASE(buff) (0x00008900+(buff)*4)
493#define NV_PVIDEO_LIMIT(buff) (0x00008908+(buff)*4)
494#define NV_PVIDEO_LUMINANCE(buff) (0x00008910+(buff)*4)
495#define NV_PVIDEO_CHROMINANCE(buff) (0x00008918+(buff)*4)
496#define NV_PVIDEO_OFFSET_BUFF(buff) (0x00008920+(buff)*4)
497#define NV_PVIDEO_SIZE_IN(buff) (0x00008928+(buff)*4)
498#define NV_PVIDEO_POINT_IN(buff) (0x00008930+(buff)*4)
499#define NV_PVIDEO_DS_DX(buff) (0x00008938+(buff)*4)
500#define NV_PVIDEO_DT_DY(buff) (0x00008940+(buff)*4)
501#define NV_PVIDEO_POINT_OUT(buff) (0x00008948+(buff)*4)
502#define NV_PVIDEO_SIZE_OUT(buff) (0x00008950+(buff)*4)
503#define NV_PVIDEO_FORMAT(buff) (0x00008958+(buff)*4)
504# define NV_PVIDEO_FORMAT_PLANAR (1 << 0)
505# define NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8 (1 << 16)
506# define NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY (1 << 20)
507# define NV_PVIDEO_FORMAT_MATRIX_ITURBT709 (1 << 24)
508#define NV_PVIDEO_COLOR_KEY 0x00008B00
509
510/* NV04 overlay defines from VIDIX & Haiku */
511#define NV_PVIDEO_INTR_EN_0 0x00680140
512#define NV_PVIDEO_STEP_SIZE 0x00680200
513#define NV_PVIDEO_CONTROL_Y 0x00680204
514#define NV_PVIDEO_CONTROL_X 0x00680208
515#define NV_PVIDEO_BUFF0_START_ADDRESS 0x0068020c
516#define NV_PVIDEO_BUFF0_PITCH_LENGTH 0x00680214
517#define NV_PVIDEO_BUFF0_OFFSET 0x0068021c
518#define NV_PVIDEO_BUFF1_START_ADDRESS 0x00680210
519#define NV_PVIDEO_BUFF1_PITCH_LENGTH 0x00680218
520#define NV_PVIDEO_BUFF1_OFFSET 0x00680220
521#define NV_PVIDEO_OE_STATE 0x00680224
522#define NV_PVIDEO_SU_STATE 0x00680228
523#define NV_PVIDEO_RM_STATE 0x0068022c
524#define NV_PVIDEO_WINDOW_START 0x00680230
525#define NV_PVIDEO_WINDOW_SIZE 0x00680234
526#define NV_PVIDEO_FIFO_THRES_SIZE 0x00680238
527#define NV_PVIDEO_FIFO_BURST_LENGTH 0x0068023c
528#define NV_PVIDEO_KEY 0x00680240
529#define NV_PVIDEO_OVERLAY 0x00680244
530#define NV_PVIDEO_RED_CSC_OFFSET 0x00680280
531#define NV_PVIDEO_GREEN_CSC_OFFSET 0x00680284
532#define NV_PVIDEO_BLUE_CSC_OFFSET 0x00680288
533#define NV_PVIDEO_CSC_ADJUST 0x0068028c
534
535#endif
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index b5713eedd6e1..feb52eee4314 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \ 49 radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \ 50 rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \ 51 r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
52 r600_blit_kms.o radeon_pm.o 52 r600_blit_kms.o radeon_pm.o atombios_dp.o
53 53
54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 54radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
55 55
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d67c42555ab9..6578d19dff93 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -263,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
263 case ATOM_ARG_FB: 263 case ATOM_ARG_FB:
264 idx = U8(*ptr); 264 idx = U8(*ptr);
265 (*ptr)++; 265 (*ptr)++;
266 val = gctx->scratch[((gctx->fb_base + idx) / 4)];
266 if (print) 267 if (print)
267 DEBUG("FB[0x%02X]", idx); 268 DEBUG("FB[0x%02X]", idx);
268 printk(KERN_INFO "FB access is not implemented.\n"); 269 break;
269 return 0;
270 case ATOM_ARG_IMM: 270 case ATOM_ARG_IMM:
271 switch (align) { 271 switch (align) {
272 case ATOM_SRC_DWORD: 272 case ATOM_SRC_DWORD:
@@ -488,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
488 case ATOM_ARG_FB: 488 case ATOM_ARG_FB:
489 idx = U8(*ptr); 489 idx = U8(*ptr);
490 (*ptr)++; 490 (*ptr)++;
491 gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
491 DEBUG("FB[0x%02X]", idx); 492 DEBUG("FB[0x%02X]", idx);
492 printk(KERN_INFO "FB access is not implemented.\n"); 493 break;
493 return;
494 case ATOM_ARG_PLL: 494 case ATOM_ARG_PLL:
495 idx = U8(*ptr); 495 idx = U8(*ptr);
496 (*ptr)++; 496 (*ptr)++;
@@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
1214 *crev = CU8(idx + 3); 1214 *crev = CU8(idx + 3);
1215 return; 1215 return;
1216} 1216}
1217
1218int atom_allocate_fb_scratch(struct atom_context *ctx)
1219{
1220 int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1221 uint16_t data_offset;
1222 int usage_bytes;
1223 struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1224
1225 atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
1226
1227 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1228
1229 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1230 firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
1231 firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1232
1233 usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1234 if (usage_bytes == 0)
1235 usage_bytes = 20 * 1024;
1236 /* allocate some scratch memory */
1237 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1238 if (!ctx->scratch)
1239 return -ENOMEM;
1240 return 0;
1241}
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index e6eb38f2bcae..6671848e5ea1 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -132,6 +132,7 @@ struct atom_context {
132 uint8_t shift; 132 uint8_t shift;
133 int cs_equal, cs_above; 133 int cs_equal, cs_above;
134 int io_mode; 134 int io_mode;
135 uint32_t *scratch;
135}; 136};
136 137
137extern int atom_debug; 138extern int atom_debug;
@@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *);
142void atom_destroy(struct atom_context *); 143void atom_destroy(struct atom_context *);
143void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start); 144void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
144void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev); 145void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
146int atom_allocate_fb_scratch(struct atom_context *ctx);
145#include "atom-types.h" 147#include "atom-types.h"
146#include "atombios.h" 148#include "atombios.h"
147#include "ObjectID.h" 149#include "ObjectID.h"
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 6643afc36cea..5f48515c77a7 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
2680typedef struct _ATOM_HPD_INT_RECORD { 2680typedef struct _ATOM_HPD_INT_RECORD {
2681 ATOM_COMMON_RECORD_HEADER sheader; 2681 ATOM_COMMON_RECORD_HEADER sheader;
2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */ 2682 UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
2683 UCHAR ucPluggged_PinState; 2683 UCHAR ucPlugged_PinState;
2684} ATOM_HPD_INT_RECORD; 2684} ATOM_HPD_INT_RECORD;
2685 2685
2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD { 2686typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index c15287a590ff..260fcf59f00c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
241{ 241{
242 struct drm_device *dev = crtc->dev; 242 struct drm_device *dev = crtc->dev;
243 struct radeon_device *rdev = dev->dev_private; 243 struct radeon_device *rdev = dev->dev_private;
244 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
244 245
245 switch (mode) { 246 switch (mode) {
246 case DRM_MODE_DPMS_ON: 247 case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
248 if (ASIC_IS_DCE3(rdev)) 249 if (ASIC_IS_DCE3(rdev))
249 atombios_enable_crtc_memreq(crtc, 1); 250 atombios_enable_crtc_memreq(crtc, 1);
250 atombios_blank_crtc(crtc, 0); 251 atombios_blank_crtc(crtc, 0);
252 drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
253 radeon_crtc_load_lut(crtc);
251 break; 254 break;
252 case DRM_MODE_DPMS_STANDBY: 255 case DRM_MODE_DPMS_STANDBY:
253 case DRM_MODE_DPMS_SUSPEND: 256 case DRM_MODE_DPMS_SUSPEND:
254 case DRM_MODE_DPMS_OFF: 257 case DRM_MODE_DPMS_OFF:
258 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
255 atombios_blank_crtc(crtc, 1); 259 atombios_blank_crtc(crtc, 1);
256 if (ASIC_IS_DCE3(rdev)) 260 if (ASIC_IS_DCE3(rdev))
257 atombios_enable_crtc_memreq(crtc, 0); 261 atombios_enable_crtc_memreq(crtc, 0);
258 atombios_enable_crtc(crtc, 0); 262 atombios_enable_crtc(crtc, 0);
259 break; 263 break;
260 } 264 }
261
262 if (mode != DRM_MODE_DPMS_OFF) {
263 radeon_crtc_load_lut(crtc);
264 }
265} 265}
266 266
267static void 267static void
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
457 if (encoder->encoder_type != 457 if (encoder->encoder_type !=
458 DRM_MODE_ENCODER_DAC) 458 DRM_MODE_ENCODER_DAC)
459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 459 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
460 if (!ASIC_IS_AVIVO(rdev) 460 if (encoder->encoder_type ==
461 && (encoder->encoder_type == 461 DRM_MODE_ENCODER_LVDS)
462 DRM_MODE_ENCODER_LVDS))
463 pll_flags |= RADEON_PLL_USE_REF_DIV; 462 pll_flags |= RADEON_PLL_USE_REF_DIV;
464 } 463 }
465 radeon_encoder = to_radeon_encoder(encoder); 464 radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
500 else 499 else
501 pll = &rdev->clock.p2pll; 500 pll = &rdev->clock.p2pll;
502 501
503 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 502 if (ASIC_IS_AVIVO(rdev)) {
504 &ref_div, &post_div, pll_flags); 503 if (radeon_new_pll)
504 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
505 &fb_div, &frac_fb_div,
506 &ref_div, &post_div, pll_flags);
507 else
508 radeon_compute_pll(pll, adjusted_clock, &pll_clock,
509 &fb_div, &frac_fb_div,
510 &ref_div, &post_div, pll_flags);
511 } else
512 radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
513 &ref_div, &post_div, pll_flags);
505 514
506 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock); 515 index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
507 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, 516 atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
574 struct radeon_device *rdev = dev->dev_private; 583 struct radeon_device *rdev = dev->dev_private;
575 struct radeon_framebuffer *radeon_fb; 584 struct radeon_framebuffer *radeon_fb;
576 struct drm_gem_object *obj; 585 struct drm_gem_object *obj;
577 struct drm_radeon_gem_object *obj_priv; 586 struct radeon_bo *rbo;
578 uint64_t fb_location; 587 uint64_t fb_location;
579 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 588 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
589 int r;
580 590
581 if (!crtc->fb) 591 /* no fb bound */
582 return -EINVAL; 592 if (!crtc->fb) {
593 DRM_DEBUG("No FB bound\n");
594 return 0;
595 }
583 596
584 radeon_fb = to_radeon_framebuffer(crtc->fb); 597 radeon_fb = to_radeon_framebuffer(crtc->fb);
585 598
599 /* Pin framebuffer & get tilling informations */
586 obj = radeon_fb->obj; 600 obj = radeon_fb->obj;
587 obj_priv = obj->driver_private; 601 rbo = obj->driver_private;
588 602 r = radeon_bo_reserve(rbo, false);
589 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) { 603 if (unlikely(r != 0))
604 return r;
605 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
606 if (unlikely(r != 0)) {
607 radeon_bo_unreserve(rbo);
590 return -EINVAL; 608 return -EINVAL;
591 } 609 }
610 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
611 radeon_bo_unreserve(rbo);
592 612
593 switch (crtc->fb->bits_per_pixel) { 613 switch (crtc->fb->bits_per_pixel) {
594 case 8: 614 case 8:
@@ -618,8 +638,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
618 return -EINVAL; 638 return -EINVAL;
619 } 639 }
620 640
621 radeon_object_get_tiling_flags(obj->driver_private,
622 &tiling_flags, NULL);
623 if (tiling_flags & RADEON_TILING_MACRO) 641 if (tiling_flags & RADEON_TILING_MACRO)
624 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; 642 fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
625 643
@@ -674,7 +692,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
674 692
675 if (old_fb && old_fb != crtc->fb) { 693 if (old_fb && old_fb != crtc->fb) {
676 radeon_fb = to_radeon_framebuffer(old_fb); 694 radeon_fb = to_radeon_framebuffer(old_fb);
677 radeon_gem_object_unpin(radeon_fb->obj); 695 rbo = radeon_fb->obj->driver_private;
696 r = radeon_bo_reserve(rbo, false);
697 if (unlikely(r != 0))
698 return r;
699 radeon_bo_unpin(rbo);
700 radeon_bo_unreserve(rbo);
678 } 701 }
679 702
680 /* Bytes per pixel may have changed */ 703 /* Bytes per pixel may have changed */
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
new file mode 100644
index 000000000000..0d63c4436e7c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -0,0 +1,790 @@
1/*
2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie
24 * Alex Deucher
25 */
26#include "drmP.h"
27#include "radeon_drm.h"
28#include "radeon.h"
29
30#include "atom.h"
31#include "atom-bits.h"
32#include "drm_dp_helper.h"
33
34/* move these to drm_dp_helper.c/h */
35#define DP_LINK_CONFIGURATION_SIZE 9
36#define DP_LINK_STATUS_SIZE 6
37#define DP_DPCD_SIZE 8
38
39static char *voltage_names[] = {
40 "0.4V", "0.6V", "0.8V", "1.2V"
41};
42static char *pre_emph_names[] = {
43 "0dB", "3.5dB", "6dB", "9.5dB"
44};
45
46static const int dp_clocks[] = {
47 54000, /* 1 lane, 1.62 Ghz */
48 90000, /* 1 lane, 2.70 Ghz */
49 108000, /* 2 lane, 1.62 Ghz */
50 180000, /* 2 lane, 2.70 Ghz */
51 216000, /* 4 lane, 1.62 Ghz */
52 360000, /* 4 lane, 2.70 Ghz */
53};
54
55static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
56
57/* common helper functions */
58static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
59{
60 int i;
61 u8 max_link_bw;
62 u8 max_lane_count;
63
64 if (!dpcd)
65 return 0;
66
67 max_link_bw = dpcd[DP_MAX_LINK_RATE];
68 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
69
70 switch (max_link_bw) {
71 case DP_LINK_BW_1_62:
72 default:
73 for (i = 0; i < num_dp_clocks; i++) {
74 if (i % 2)
75 continue;
76 switch (max_lane_count) {
77 case 1:
78 if (i > 1)
79 return 0;
80 break;
81 case 2:
82 if (i > 3)
83 return 0;
84 break;
85 case 4:
86 default:
87 break;
88 }
89 if (dp_clocks[i] > mode_clock) {
90 if (i < 2)
91 return 1;
92 else if (i < 4)
93 return 2;
94 else
95 return 4;
96 }
97 }
98 break;
99 case DP_LINK_BW_2_7:
100 for (i = 0; i < num_dp_clocks; i++) {
101 switch (max_lane_count) {
102 case 1:
103 if (i > 1)
104 return 0;
105 break;
106 case 2:
107 if (i > 3)
108 return 0;
109 break;
110 case 4:
111 default:
112 break;
113 }
114 if (dp_clocks[i] > mode_clock) {
115 if (i < 2)
116 return 1;
117 else if (i < 4)
118 return 2;
119 else
120 return 4;
121 }
122 }
123 break;
124 }
125
126 return 0;
127}
128
129static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
130{
131 int i;
132 u8 max_link_bw;
133 u8 max_lane_count;
134
135 if (!dpcd)
136 return 0;
137
138 max_link_bw = dpcd[DP_MAX_LINK_RATE];
139 max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 default:
144 for (i = 0; i < num_dp_clocks; i++) {
145 if (i % 2)
146 continue;
147 switch (max_lane_count) {
148 case 1:
149 if (i > 1)
150 return 0;
151 break;
152 case 2:
153 if (i > 3)
154 return 0;
155 break;
156 case 4:
157 default:
158 break;
159 }
160 if (dp_clocks[i] > mode_clock)
161 return 162000;
162 }
163 break;
164 case DP_LINK_BW_2_7:
165 for (i = 0; i < num_dp_clocks; i++) {
166 switch (max_lane_count) {
167 case 1:
168 if (i > 1)
169 return 0;
170 break;
171 case 2:
172 if (i > 3)
173 return 0;
174 break;
175 case 4:
176 default:
177 break;
178 }
179 if (dp_clocks[i] > mode_clock)
180 return (i % 2) ? 270000 : 162000;
181 }
182 }
183
184 return 0;
185}
186
187int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
188{
189 int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
190 int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
191
192 if ((lanes == 0) || (bw == 0))
193 return MODE_CLOCK_HIGH;
194
195 return MODE_OK;
196}
197
198static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
199{
200 return link_status[r - DP_LANE0_1_STATUS];
201}
202
203static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
204 int lane)
205{
206 int i = DP_LANE0_1_STATUS + (lane >> 1);
207 int s = (lane & 1) * 4;
208 u8 l = dp_link_status(link_status, i);
209 return (l >> s) & 0xf;
210}
211
212static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
213 int lane_count)
214{
215 int lane;
216 u8 lane_status;
217
218 for (lane = 0; lane < lane_count; lane++) {
219 lane_status = dp_get_lane_status(link_status, lane);
220 if ((lane_status & DP_LANE_CR_DONE) == 0)
221 return false;
222 }
223 return true;
224}
225
226static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
227 int lane_count)
228{
229 u8 lane_align;
230 u8 lane_status;
231 int lane;
232
233 lane_align = dp_link_status(link_status,
234 DP_LANE_ALIGN_STATUS_UPDATED);
235 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
236 return false;
237 for (lane = 0; lane < lane_count; lane++) {
238 lane_status = dp_get_lane_status(link_status, lane);
239 if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
240 return false;
241 }
242 return true;
243}
244
245static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
246 int lane)
247
248{
249 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
250 int s = ((lane & 1) ?
251 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
252 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
253 u8 l = dp_link_status(link_status, i);
254
255 return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
256}
257
258static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
259 int lane)
260{
261 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
262 int s = ((lane & 1) ?
263 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
264 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
265 u8 l = dp_link_status(link_status, i);
266
267 return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
268}
269
270/* XXX fix me -- chip specific */
271#define DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_1200
272static u8 dp_pre_emphasis_max(u8 voltage_swing)
273{
274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
275 case DP_TRAIN_VOLTAGE_SWING_400:
276 return DP_TRAIN_PRE_EMPHASIS_6;
277 case DP_TRAIN_VOLTAGE_SWING_600:
278 return DP_TRAIN_PRE_EMPHASIS_6;
279 case DP_TRAIN_VOLTAGE_SWING_800:
280 return DP_TRAIN_PRE_EMPHASIS_3_5;
281 case DP_TRAIN_VOLTAGE_SWING_1200:
282 default:
283 return DP_TRAIN_PRE_EMPHASIS_0;
284 }
285}
286
287static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
288 int lane_count,
289 u8 train_set[4])
290{
291 u8 v = 0;
292 u8 p = 0;
293 int lane;
294
295 for (lane = 0; lane < lane_count; lane++) {
296 u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
297 u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
298
299 DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
300 lane,
301 voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
302 pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
303
304 if (this_v > v)
305 v = this_v;
306 if (this_p > p)
307 p = this_p;
308 }
309
310 if (v >= DP_VOLTAGE_MAX)
311 v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
312
313 if (p >= dp_pre_emphasis_max(v))
314 p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
315
316 DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
317 voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
318 pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
319
320 for (lane = 0; lane < 4; lane++)
321 train_set[lane] = v | p;
322}
323
324
325/* radeon aux chan functions */
326bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
327 int num_bytes, u8 *read_byte,
328 u8 read_buf_len, u8 delay)
329{
330 struct drm_device *dev = chan->dev;
331 struct radeon_device *rdev = dev->dev_private;
332 PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
333 int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
334 unsigned char *base;
335
336 memset(&args, 0, sizeof(args));
337
338 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
339
340 memcpy(base, req_bytes, num_bytes);
341
342 args.lpAuxRequest = 0;
343 args.lpDataOut = 16;
344 args.ucDataOutLen = 0;
345 args.ucChannelID = chan->rec.i2c_id;
346 args.ucDelay = delay / 10;
347
348 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
349
350 if (args.ucReplyStatus) {
351 DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
352 req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
353 chan->rec.i2c_id, args.ucReplyStatus);
354 return false;
355 }
356
357 if (args.ucDataOutLen && read_byte && read_buf_len) {
358 if (read_buf_len < args.ucDataOutLen) {
359 DRM_ERROR("Buffer to small for return answer %d %d\n",
360 read_buf_len, args.ucDataOutLen);
361 return false;
362 }
363 {
364 int len = min(read_buf_len, args.ucDataOutLen);
365 memcpy(read_byte, base + 16, len);
366 }
367 }
368 return true;
369}
370
371bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
372 uint8_t send_bytes, uint8_t *send)
373{
374 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
375 u8 msg[20];
376 u8 msg_len, dp_msg_len;
377 bool ret;
378
379 dp_msg_len = 4;
380 msg[0] = address;
381 msg[1] = address >> 8;
382 msg[2] = AUX_NATIVE_WRITE << 4;
383 dp_msg_len += send_bytes;
384 msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
385
386 if (send_bytes > 16)
387 return false;
388
389 memcpy(&msg[4], send, send_bytes);
390 msg_len = 4 + send_bytes;
391 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
392 return ret;
393}
394
395bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
396 uint8_t delay, uint8_t expected_bytes,
397 uint8_t *read_p)
398{
399 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
400 u8 msg[20];
401 u8 msg_len, dp_msg_len;
402 bool ret = false;
403 msg_len = 4;
404 dp_msg_len = 4;
405 msg[0] = address;
406 msg[1] = address >> 8;
407 msg[2] = AUX_NATIVE_READ << 4;
408 msg[3] = (dp_msg_len) << 4;
409 msg[3] |= expected_bytes - 1;
410
411 ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
412 return ret;
413}
414
415/* radeon dp functions */
416static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
417 uint8_t ucconfig, uint8_t lane_num)
418{
419 DP_ENCODER_SERVICE_PARAMETERS args;
420 int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
421
422 memset(&args, 0, sizeof(args));
423 args.ucLinkClock = dp_clock / 10;
424 args.ucConfig = ucconfig;
425 args.ucAction = action;
426 args.ucLaneNum = lane_num;
427 args.ucStatus = 0;
428
429 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
430 return args.ucStatus;
431}
432
433u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
434{
435 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
436 struct drm_device *dev = radeon_connector->base.dev;
437 struct radeon_device *rdev = dev->dev_private;
438
439 return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
440 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
441}
442
443bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
444{
445 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
446 u8 msg[25];
447 int ret;
448
449 ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
450 if (ret) {
451 memcpy(dig_connector->dpcd, msg, 8);
452 {
453 int i;
454 DRM_DEBUG("DPCD: ");
455 for (i = 0; i < 8; i++)
456 DRM_DEBUG("%02x ", msg[i]);
457 DRM_DEBUG("\n");
458 }
459 return true;
460 }
461 dig_connector->dpcd[0] = 0;
462 return false;
463}
464
465void radeon_dp_set_link_config(struct drm_connector *connector,
466 struct drm_display_mode *mode)
467{
468 struct radeon_connector *radeon_connector;
469 struct radeon_connector_atom_dig *dig_connector;
470
471 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
472 return;
473
474 radeon_connector = to_radeon_connector(connector);
475 if (!radeon_connector->con_priv)
476 return;
477 dig_connector = radeon_connector->con_priv;
478
479 dig_connector->dp_clock =
480 dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
481 dig_connector->dp_lane_count =
482 dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
483}
484
485int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
486 struct drm_display_mode *mode)
487{
488 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
489
490 return dp_mode_valid(dig_connector->dpcd, mode->clock);
491}
492
493static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
494 u8 link_status[DP_LINK_STATUS_SIZE])
495{
496 int ret;
497 ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
498 DP_LINK_STATUS_SIZE, link_status);
499 if (!ret) {
500 DRM_ERROR("displayport link status failed\n");
501 return false;
502 }
503
504 DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
505 link_status[0], link_status[1], link_status[2],
506 link_status[3], link_status[4], link_status[5]);
507 return true;
508}
509
510bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
511{
512 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
513 u8 link_status[DP_LINK_STATUS_SIZE];
514
515 if (!atom_dp_get_link_status(radeon_connector, link_status))
516 return false;
517 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
518 return false;
519 return true;
520}
521
522static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
523{
524 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
525
526 if (dig_connector->dpcd[0] >= 0x11) {
527 radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
528 &power_state);
529 }
530}
531
532static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
533{
534 radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
535 &downspread);
536}
537
538static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
539 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
540{
541 radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
542 link_configuration);
543}
544
545static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
546 struct drm_encoder *encoder,
547 u8 train_set[4])
548{
549 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
550 int i;
551
552 for (i = 0; i < dig_connector->dp_lane_count; i++)
553 atombios_dig_transmitter_setup(encoder,
554 ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
555 i, train_set[i]);
556
557 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
558 dig_connector->dp_lane_count, train_set);
559}
560
561static void dp_set_training(struct radeon_connector *radeon_connector,
562 u8 training)
563{
564 radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
565 1, &training);
566}
567
568void dp_link_train(struct drm_encoder *encoder,
569 struct drm_connector *connector)
570{
571 struct drm_device *dev = encoder->dev;
572 struct radeon_device *rdev = dev->dev_private;
573 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
574 struct radeon_encoder_atom_dig *dig;
575 struct radeon_connector *radeon_connector;
576 struct radeon_connector_atom_dig *dig_connector;
577 int enc_id = 0;
578 bool clock_recovery, channel_eq;
579 u8 link_status[DP_LINK_STATUS_SIZE];
580 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
581 u8 tries, voltage;
582 u8 train_set[4];
583 int i;
584
585 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
586 return;
587
588 if (!radeon_encoder->enc_priv)
589 return;
590 dig = radeon_encoder->enc_priv;
591
592 radeon_connector = to_radeon_connector(connector);
593 if (!radeon_connector->con_priv)
594 return;
595 dig_connector = radeon_connector->con_priv;
596
597 if (ASIC_IS_DCE32(rdev)) {
598 if (dig->dig_block)
599 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
600 else
601 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
602 if (dig_connector->linkb)
603 enc_id |= ATOM_DP_CONFIG_LINK_B;
604 else
605 enc_id |= ATOM_DP_CONFIG_LINK_A;
606 } else {
607 if (dig_connector->linkb)
608 enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
609 else
610 enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
611 }
612
613 memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
614 if (dig_connector->dp_clock == 270000)
615 link_configuration[0] = DP_LINK_BW_2_7;
616 else
617 link_configuration[0] = DP_LINK_BW_1_62;
618 link_configuration[1] = dig_connector->dp_lane_count;
619 if (dig_connector->dpcd[0] >= 0x11)
620 link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
621
622 /* power up the sink */
623 dp_set_power(radeon_connector, DP_SET_POWER_D0);
624 /* disable the training pattern on the sink */
625 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
626 /* set link bw and lanes on the sink */
627 dp_set_link_bw_lanes(radeon_connector, link_configuration);
628 /* disable downspread on the sink */
629 dp_set_downspread(radeon_connector, 0);
630 /* start training on the source */
631 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
632 dig_connector->dp_clock, enc_id, 0);
633 /* set training pattern 1 on the source */
634 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
635 dig_connector->dp_clock, enc_id, 0);
636
637 /* set initial vs/emph */
638 memset(train_set, 0, 4);
639 udelay(400);
640 /* set training pattern 1 on the sink */
641 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
642
643 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
644
645 /* clock recovery loop */
646 clock_recovery = false;
647 tries = 0;
648 voltage = 0xff;
649 for (;;) {
650 udelay(100);
651 if (!atom_dp_get_link_status(radeon_connector, link_status))
652 break;
653
654 if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
655 clock_recovery = true;
656 break;
657 }
658
659 for (i = 0; i < dig_connector->dp_lane_count; i++) {
660 if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
661 break;
662 }
663 if (i == dig_connector->dp_lane_count) {
664 DRM_ERROR("clock recovery reached max voltage\n");
665 break;
666 }
667
668 if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
669 ++tries;
670 if (tries == 5) {
671 DRM_ERROR("clock recovery tried 5 times\n");
672 break;
673 }
674 } else
675 tries = 0;
676
677 voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
678
679 /* Compute new train_set as requested by sink */
680 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
681 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
682 }
683 if (!clock_recovery)
684 DRM_ERROR("clock recovery failed\n");
685 else
686 DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
687 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
688 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
689 DP_TRAIN_PRE_EMPHASIS_SHIFT);
690
691
692 /* set training pattern 2 on the sink */
693 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
694 /* set training pattern 2 on the source */
695 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
696 dig_connector->dp_clock, enc_id, 1);
697
698 /* channel equalization loop */
699 tries = 0;
700 channel_eq = false;
701 for (;;) {
702 udelay(400);
703 if (!atom_dp_get_link_status(radeon_connector, link_status))
704 break;
705
706 if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
707 channel_eq = true;
708 break;
709 }
710
711 /* Try 5 times */
712 if (tries > 5) {
713 DRM_ERROR("channel eq failed: 5 tries\n");
714 break;
715 }
716
717 /* Compute new train_set as requested by sink */
718 dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
719 dp_update_dpvs_emph(radeon_connector, encoder, train_set);
720
721 tries++;
722 }
723
724 if (!channel_eq)
725 DRM_ERROR("channel eq failed\n");
726 else
727 DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
728 train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
729 (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
730 >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
731
732 /* disable the training pattern on the sink */
733 dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
734
735 radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
736 dig_connector->dp_clock, enc_id, 0);
737}
738
739int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
740 uint8_t write_byte, uint8_t *read_byte)
741{
742 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
743 struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
744 int ret = 0;
745 uint16_t address = algo_data->address;
746 uint8_t msg[5];
747 uint8_t reply[2];
748 int msg_len, dp_msg_len;
749 int reply_bytes;
750
751 /* Set up the command byte */
752 if (mode & MODE_I2C_READ)
753 msg[2] = AUX_I2C_READ << 4;
754 else
755 msg[2] = AUX_I2C_WRITE << 4;
756
757 if (!(mode & MODE_I2C_STOP))
758 msg[2] |= AUX_I2C_MOT << 4;
759
760 msg[0] = address;
761 msg[1] = address >> 8;
762
763 reply_bytes = 1;
764
765 msg_len = 4;
766 dp_msg_len = 3;
767 switch (mode) {
768 case MODE_I2C_WRITE:
769 msg[4] = write_byte;
770 msg_len++;
771 dp_msg_len += 2;
772 break;
773 case MODE_I2C_READ:
774 dp_msg_len += 1;
775 break;
776 default:
777 break;
778 }
779
780 msg[3] = (dp_msg_len) << 4;
781 ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
782
783 if (ret) {
784 if (read_byte)
785 *read_byte = reply[0];
786 return reply_bytes;
787 }
788 return -EREMOTEIO;
789}
790
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c9e93eabcf16..824cc6480a06 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 65 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
66 */ 66 */
67 67
68/* hpd for digital panel detect/disconnect */
69bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
70{
71 bool connected = false;
72
73 switch (hpd) {
74 case RADEON_HPD_1:
75 if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
76 connected = true;
77 break;
78 case RADEON_HPD_2:
79 if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
80 connected = true;
81 break;
82 default:
83 break;
84 }
85 return connected;
86}
87
88void r100_hpd_set_polarity(struct radeon_device *rdev,
89 enum radeon_hpd_id hpd)
90{
91 u32 tmp;
92 bool connected = r100_hpd_sense(rdev, hpd);
93
94 switch (hpd) {
95 case RADEON_HPD_1:
96 tmp = RREG32(RADEON_FP_GEN_CNTL);
97 if (connected)
98 tmp &= ~RADEON_FP_DETECT_INT_POL;
99 else
100 tmp |= RADEON_FP_DETECT_INT_POL;
101 WREG32(RADEON_FP_GEN_CNTL, tmp);
102 break;
103 case RADEON_HPD_2:
104 tmp = RREG32(RADEON_FP2_GEN_CNTL);
105 if (connected)
106 tmp &= ~RADEON_FP2_DETECT_INT_POL;
107 else
108 tmp |= RADEON_FP2_DETECT_INT_POL;
109 WREG32(RADEON_FP2_GEN_CNTL, tmp);
110 break;
111 default:
112 break;
113 }
114}
115
116void r100_hpd_init(struct radeon_device *rdev)
117{
118 struct drm_device *dev = rdev->ddev;
119 struct drm_connector *connector;
120
121 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
122 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
123 switch (radeon_connector->hpd.hpd) {
124 case RADEON_HPD_1:
125 rdev->irq.hpd[0] = true;
126 break;
127 case RADEON_HPD_2:
128 rdev->irq.hpd[1] = true;
129 break;
130 default:
131 break;
132 }
133 }
134 r100_irq_set(rdev);
135}
136
137void r100_hpd_fini(struct radeon_device *rdev)
138{
139 struct drm_device *dev = rdev->ddev;
140 struct drm_connector *connector;
141
142 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
143 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
144 switch (radeon_connector->hpd.hpd) {
145 case RADEON_HPD_1:
146 rdev->irq.hpd[0] = false;
147 break;
148 case RADEON_HPD_2:
149 rdev->irq.hpd[1] = false;
150 break;
151 default:
152 break;
153 }
154 }
155}
156
68/* 157/*
69 * PCI GART 158 * PCI GART
70 */ 159 */
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
94 return radeon_gart_table_ram_alloc(rdev); 183 return radeon_gart_table_ram_alloc(rdev);
95} 184}
96 185
186/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
187void r100_enable_bm(struct radeon_device *rdev)
188{
189 uint32_t tmp;
190 /* Enable bus mastering */
191 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
192 WREG32(RADEON_BUS_CNTL, tmp);
193}
194
97int r100_pci_gart_enable(struct radeon_device *rdev) 195int r100_pci_gart_enable(struct radeon_device *rdev)
98{ 196{
99 uint32_t tmp; 197 uint32_t tmp;
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
105 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location); 203 WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
106 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; 204 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
107 WREG32(RADEON_AIC_HI_ADDR, tmp); 205 WREG32(RADEON_AIC_HI_ADDR, tmp);
108 /* Enable bus mastering */
109 tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
110 WREG32(RADEON_BUS_CNTL, tmp);
111 /* set PCI GART page-table base address */ 206 /* set PCI GART page-table base address */
112 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr); 207 WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
113 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN; 208 tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev)
157 if (rdev->irq.crtc_vblank_int[1]) { 252 if (rdev->irq.crtc_vblank_int[1]) {
158 tmp |= RADEON_CRTC2_VBLANK_MASK; 253 tmp |= RADEON_CRTC2_VBLANK_MASK;
159 } 254 }
255 if (rdev->irq.hpd[0]) {
256 tmp |= RADEON_FP_DETECT_MASK;
257 }
258 if (rdev->irq.hpd[1]) {
259 tmp |= RADEON_FP2_DETECT_MASK;
260 }
160 WREG32(RADEON_GEN_INT_CNTL, tmp); 261 WREG32(RADEON_GEN_INT_CNTL, tmp);
161 return 0; 262 return 0;
162} 263}
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev)
175static inline uint32_t r100_irq_ack(struct radeon_device *rdev) 276static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
176{ 277{
177 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); 278 uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
178 uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT | 279 uint32_t irq_mask = RADEON_SW_INT_TEST |
179 RADEON_CRTC2_VBLANK_STAT; 280 RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
281 RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
180 282
181 if (irqs) { 283 if (irqs) {
182 WREG32(RADEON_GEN_INT_STATUS, irqs); 284 WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
187int r100_irq_process(struct radeon_device *rdev) 289int r100_irq_process(struct radeon_device *rdev)
188{ 290{
189 uint32_t status, msi_rearm; 291 uint32_t status, msi_rearm;
292 bool queue_hotplug = false;
190 293
191 status = r100_irq_ack(rdev); 294 status = r100_irq_ack(rdev);
192 if (!status) { 295 if (!status) {
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev)
207 if (status & RADEON_CRTC2_VBLANK_STAT) { 310 if (status & RADEON_CRTC2_VBLANK_STAT) {
208 drm_handle_vblank(rdev->ddev, 1); 311 drm_handle_vblank(rdev->ddev, 1);
209 } 312 }
313 if (status & RADEON_FP_DETECT_STAT) {
314 queue_hotplug = true;
315 DRM_DEBUG("HPD1\n");
316 }
317 if (status & RADEON_FP2_DETECT_STAT) {
318 queue_hotplug = true;
319 DRM_DEBUG("HPD2\n");
320 }
210 status = r100_irq_ack(rdev); 321 status = r100_irq_ack(rdev);
211 } 322 }
323 if (queue_hotplug)
324 queue_work(rdev->wq, &rdev->hotplug_work);
212 if (rdev->msi_enabled) { 325 if (rdev->msi_enabled) {
213 switch (rdev->family) { 326 switch (rdev->family) {
214 case CHIP_RS400: 327 case CHIP_RS400:
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev)
255 int r; 368 int r;
256 369
257 if (rdev->wb.wb_obj == NULL) { 370 if (rdev->wb.wb_obj == NULL) {
258 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, 371 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
259 true, 372 RADEON_GEM_DOMAIN_GTT,
260 RADEON_GEM_DOMAIN_GTT, 373 &rdev->wb.wb_obj);
261 false, &rdev->wb.wb_obj);
262 if (r) { 374 if (r) {
263 DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); 375 dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
264 return r; 376 return r;
265 } 377 }
266 r = radeon_object_pin(rdev->wb.wb_obj, 378 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
267 RADEON_GEM_DOMAIN_GTT, 379 if (unlikely(r != 0))
268 &rdev->wb.gpu_addr); 380 return r;
381 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
382 &rdev->wb.gpu_addr);
269 if (r) { 383 if (r) {
270 DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); 384 dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
385 radeon_bo_unreserve(rdev->wb.wb_obj);
271 return r; 386 return r;
272 } 387 }
273 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 388 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
389 radeon_bo_unreserve(rdev->wb.wb_obj);
274 if (r) { 390 if (r) {
275 DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); 391 dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
276 return r; 392 return r;
277 } 393 }
278 } 394 }
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev)
290 406
291void r100_wb_fini(struct radeon_device *rdev) 407void r100_wb_fini(struct radeon_device *rdev)
292{ 408{
409 int r;
410
293 r100_wb_disable(rdev); 411 r100_wb_disable(rdev);
294 if (rdev->wb.wb_obj) { 412 if (rdev->wb.wb_obj) {
295 radeon_object_kunmap(rdev->wb.wb_obj); 413 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
296 radeon_object_unpin(rdev->wb.wb_obj); 414 if (unlikely(r != 0)) {
297 radeon_object_unref(&rdev->wb.wb_obj); 415 dev_err(rdev->dev, "(%d) can't finish WB\n", r);
416 return;
417 }
418 radeon_bo_kunmap(rdev->wb.wb_obj);
419 radeon_bo_unpin(rdev->wb.wb_obj);
420 radeon_bo_unreserve(rdev->wb.wb_obj);
421 radeon_bo_unref(&rdev->wb.wb_obj);
298 rdev->wb.wb = NULL; 422 rdev->wb.wb = NULL;
299 rdev->wb.wb_obj = NULL; 423 rdev->wb.wb_obj = NULL;
300 } 424 }
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1288 1412
1289int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1413int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1290 struct radeon_cs_packet *pkt, 1414 struct radeon_cs_packet *pkt,
1291 struct radeon_object *robj) 1415 struct radeon_bo *robj)
1292{ 1416{
1293 unsigned idx; 1417 unsigned idx;
1294 u32 value; 1418 u32 value;
1295 idx = pkt->idx + 1; 1419 idx = pkt->idx + 1;
1296 value = radeon_get_ib_value(p, idx + 2); 1420 value = radeon_get_ib_value(p, idx + 2);
1297 if ((value + 1) > radeon_object_size(robj)) { 1421 if ((value + 1) > radeon_bo_size(robj)) {
1298 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1422 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1299 "(need %u have %lu) !\n", 1423 "(need %u have %lu) !\n",
1300 value + 1, 1424 value + 1,
1301 radeon_object_size(robj)); 1425 radeon_bo_size(robj));
1302 return -EINVAL; 1426 return -EINVAL;
1303 } 1427 }
1304 return 0; 1428 return 0;
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev)
1583 r100_hdp_reset(rdev); 1707 r100_hdp_reset(rdev);
1584} 1708}
1585 1709
1710void r100_hdp_flush(struct radeon_device *rdev)
1711{
1712 u32 tmp;
1713 tmp = RREG32(RADEON_HOST_PATH_CNTL);
1714 tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
1715 WREG32(RADEON_HOST_PATH_CNTL, tmp);
1716}
1717
1586void r100_hdp_reset(struct radeon_device *rdev) 1718void r100_hdp_reset(struct radeon_device *rdev)
1587{ 1719{
1588 uint32_t tmp; 1720 uint32_t tmp;
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
1650 return 0; 1782 return 0;
1651} 1783}
1652 1784
1785void r100_set_common_regs(struct radeon_device *rdev)
1786{
1787 /* set these so they don't interfere with anything */
1788 WREG32(RADEON_OV0_SCALE_CNTL, 0);
1789 WREG32(RADEON_SUBPIC_CNTL, 0);
1790 WREG32(RADEON_VIPH_CONTROL, 0);
1791 WREG32(RADEON_I2C_CNTL_1, 0);
1792 WREG32(RADEON_DVI_I2C_CNTL_1, 0);
1793 WREG32(RADEON_CAP0_TRIG_CNTL, 0);
1794 WREG32(RADEON_CAP1_TRIG_CNTL, 0);
1795}
1653 1796
1654/* 1797/*
1655 * VRAM info 1798 * VRAM info
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2594 struct r100_cs_track *track, unsigned idx) 2737 struct r100_cs_track *track, unsigned idx)
2595{ 2738{
2596 unsigned face, w, h; 2739 unsigned face, w, h;
2597 struct radeon_object *cube_robj; 2740 struct radeon_bo *cube_robj;
2598 unsigned long size; 2741 unsigned long size;
2599 2742
2600 for (face = 0; face < 5; face++) { 2743 for (face = 0; face < 5; face++) {
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2607 2750
2608 size += track->textures[idx].cube_info[face].offset; 2751 size += track->textures[idx].cube_info[face].offset;
2609 2752
2610 if (size > radeon_object_size(cube_robj)) { 2753 if (size > radeon_bo_size(cube_robj)) {
2611 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n", 2754 DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
2612 size, radeon_object_size(cube_robj)); 2755 size, radeon_bo_size(cube_robj));
2613 r100_cs_track_texture_print(&track->textures[idx]); 2756 r100_cs_track_texture_print(&track->textures[idx]);
2614 return -1; 2757 return -1;
2615 } 2758 }
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
2620static int r100_cs_track_texture_check(struct radeon_device *rdev, 2763static int r100_cs_track_texture_check(struct radeon_device *rdev,
2621 struct r100_cs_track *track) 2764 struct r100_cs_track *track)
2622{ 2765{
2623 struct radeon_object *robj; 2766 struct radeon_bo *robj;
2624 unsigned long size; 2767 unsigned long size;
2625 unsigned u, i, w, h; 2768 unsigned u, i, w, h;
2626 int ret; 2769 int ret;
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
2676 "%u\n", track->textures[u].tex_coord_type, u); 2819 "%u\n", track->textures[u].tex_coord_type, u);
2677 return -EINVAL; 2820 return -EINVAL;
2678 } 2821 }
2679 if (size > radeon_object_size(robj)) { 2822 if (size > radeon_bo_size(robj)) {
2680 DRM_ERROR("Texture of unit %u needs %lu bytes but is " 2823 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
2681 "%lu\n", u, size, radeon_object_size(robj)); 2824 "%lu\n", u, size, radeon_bo_size(robj));
2682 r100_cs_track_texture_print(&track->textures[u]); 2825 r100_cs_track_texture_print(&track->textures[u]);
2683 return -EINVAL; 2826 return -EINVAL;
2684 } 2827 }
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2700 } 2843 }
2701 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; 2844 size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
2702 size += track->cb[i].offset; 2845 size += track->cb[i].offset;
2703 if (size > radeon_object_size(track->cb[i].robj)) { 2846 if (size > radeon_bo_size(track->cb[i].robj)) {
2704 DRM_ERROR("[drm] Buffer too small for color buffer %d " 2847 DRM_ERROR("[drm] Buffer too small for color buffer %d "
2705 "(need %lu have %lu) !\n", i, size, 2848 "(need %lu have %lu) !\n", i, size,
2706 radeon_object_size(track->cb[i].robj)); 2849 radeon_bo_size(track->cb[i].robj));
2707 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", 2850 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
2708 i, track->cb[i].pitch, track->cb[i].cpp, 2851 i, track->cb[i].pitch, track->cb[i].cpp,
2709 track->cb[i].offset, track->maxy); 2852 track->cb[i].offset, track->maxy);
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2717 } 2860 }
2718 size = track->zb.pitch * track->zb.cpp * track->maxy; 2861 size = track->zb.pitch * track->zb.cpp * track->maxy;
2719 size += track->zb.offset; 2862 size += track->zb.offset;
2720 if (size > radeon_object_size(track->zb.robj)) { 2863 if (size > radeon_bo_size(track->zb.robj)) {
2721 DRM_ERROR("[drm] Buffer too small for z buffer " 2864 DRM_ERROR("[drm] Buffer too small for z buffer "
2722 "(need %lu have %lu) !\n", size, 2865 "(need %lu have %lu) !\n", size,
2723 radeon_object_size(track->zb.robj)); 2866 radeon_bo_size(track->zb.robj));
2724 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n", 2867 DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
2725 track->zb.pitch, track->zb.cpp, 2868 track->zb.pitch, track->zb.cpp,
2726 track->zb.offset, track->maxy); 2869 track->zb.offset, track->maxy);
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2738 "bound\n", prim_walk, i); 2881 "bound\n", prim_walk, i);
2739 return -EINVAL; 2882 return -EINVAL;
2740 } 2883 }
2741 if (size > radeon_object_size(track->arrays[i].robj)) { 2884 if (size > radeon_bo_size(track->arrays[i].robj)) {
2742 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " 2885 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2743 "have %lu dwords\n", prim_walk, i, 2886 "need %lu dwords have %lu dwords\n",
2744 size >> 2, 2887 prim_walk, i, size >> 2,
2745 radeon_object_size(track->arrays[i].robj) >> 2); 2888 radeon_bo_size(track->arrays[i].robj)
2889 >> 2);
2746 DRM_ERROR("Max indices %u\n", track->max_indx); 2890 DRM_ERROR("Max indices %u\n", track->max_indx);
2747 return -EINVAL; 2891 return -EINVAL;
2748 } 2892 }
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
2756 "bound\n", prim_walk, i); 2900 "bound\n", prim_walk, i);
2757 return -EINVAL; 2901 return -EINVAL;
2758 } 2902 }
2759 if (size > radeon_object_size(track->arrays[i].robj)) { 2903 if (size > radeon_bo_size(track->arrays[i].robj)) {
2760 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " 2904 dev_err(rdev->dev, "(PW %u) Vertex array %u "
2761 "have %lu dwords\n", prim_walk, i, size >> 2, 2905 "need %lu dwords have %lu dwords\n",
2762 radeon_object_size(track->arrays[i].robj) >> 2); 2906 prim_walk, i, size >> 2,
2907 radeon_bo_size(track->arrays[i].robj)
2908 >> 2);
2763 return -EINVAL; 2909 return -EINVAL;
2764 } 2910 }
2765 } 2911 }
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev)
3101{ 3247{
3102 int r; 3248 int r;
3103 3249
3250 /* set common regs */
3251 r100_set_common_regs(rdev);
3252 /* program mc */
3104 r100_mc_program(rdev); 3253 r100_mc_program(rdev);
3105 /* Resume clock */ 3254 /* Resume clock */
3106 r100_clock_startup(rdev); 3255 r100_clock_startup(rdev);
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev)
3108 r100_gpu_init(rdev); 3257 r100_gpu_init(rdev);
3109 /* Initialize GART (initialize after TTM so we can allocate 3258 /* Initialize GART (initialize after TTM so we can allocate
3110 * memory through TTM but finalize after TTM) */ 3259 * memory through TTM but finalize after TTM) */
3260 r100_enable_bm(rdev);
3111 if (rdev->flags & RADEON_IS_PCI) { 3261 if (rdev->flags & RADEON_IS_PCI) {
3112 r = r100_pci_gart_enable(rdev); 3262 r = r100_pci_gart_enable(rdev);
3113 if (r) 3263 if (r)
3114 return r; 3264 return r;
3115 } 3265 }
3116 /* Enable IRQ */ 3266 /* Enable IRQ */
3117 rdev->irq.sw_int = true;
3118 r100_irq_set(rdev); 3267 r100_irq_set(rdev);
3119 /* 1M ring buffer */ 3268 /* 1M ring buffer */
3120 r = r100_cp_init(rdev, 1024 * 1024); 3269 r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
3150 radeon_combios_asic_init(rdev->ddev); 3299 radeon_combios_asic_init(rdev->ddev);
3151 /* Resume clock after posting */ 3300 /* Resume clock after posting */
3152 r100_clock_startup(rdev); 3301 r100_clock_startup(rdev);
3302 /* Initialize surface registers */
3303 radeon_surface_init(rdev);
3153 return r100_startup(rdev); 3304 return r100_startup(rdev);
3154} 3305}
3155 3306
@@ -3174,7 +3325,7 @@ void r100_fini(struct radeon_device *rdev)
3174 r100_pci_gart_fini(rdev); 3325 r100_pci_gart_fini(rdev);
3175 radeon_irq_kms_fini(rdev); 3326 radeon_irq_kms_fini(rdev);
3176 radeon_fence_driver_fini(rdev); 3327 radeon_fence_driver_fini(rdev);
3177 radeon_object_fini(rdev); 3328 radeon_bo_fini(rdev);
3178 radeon_atombios_fini(rdev); 3329 radeon_atombios_fini(rdev);
3179 kfree(rdev->bios); 3330 kfree(rdev->bios);
3180 rdev->bios = NULL; 3331 rdev->bios = NULL;
@@ -3242,10 +3393,8 @@ int r100_init(struct radeon_device *rdev)
3242 RREG32(R_0007C0_CP_STAT)); 3393 RREG32(R_0007C0_CP_STAT));
3243 } 3394 }
3244 /* check if cards are posted or not */ 3395 /* check if cards are posted or not */
3245 if (!radeon_card_posted(rdev) && rdev->bios) { 3396 if (radeon_boot_test_post_card(rdev) == false)
3246 DRM_INFO("GPU not posted. posting now...\n"); 3397 return -EINVAL;
3247 radeon_combios_asic_init(rdev->ddev);
3248 }
3249 /* Set asic errata */ 3398 /* Set asic errata */
3250 r100_errata(rdev); 3399 r100_errata(rdev);
3251 /* Initialize clocks */ 3400 /* Initialize clocks */
@@ -3264,7 +3413,7 @@ int r100_init(struct radeon_device *rdev)
3264 if (r) 3413 if (r)
3265 return r; 3414 return r;
3266 /* Memory manager */ 3415 /* Memory manager */
3267 r = radeon_object_init(rdev); 3416 r = radeon_bo_init(rdev);
3268 if (r) 3417 if (r)
3269 return r; 3418 return r;
3270 if (rdev->flags & RADEON_IS_PCI) { 3419 if (rdev->flags & RADEON_IS_PCI) {
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 0daf0d76a891..ca50903dd2bb 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -10,26 +10,26 @@
10 * CS functions 10 * CS functions
11 */ 11 */
12struct r100_cs_track_cb { 12struct r100_cs_track_cb {
13 struct radeon_object *robj; 13 struct radeon_bo *robj;
14 unsigned pitch; 14 unsigned pitch;
15 unsigned cpp; 15 unsigned cpp;
16 unsigned offset; 16 unsigned offset;
17}; 17};
18 18
19struct r100_cs_track_array { 19struct r100_cs_track_array {
20 struct radeon_object *robj; 20 struct radeon_bo *robj;
21 unsigned esize; 21 unsigned esize;
22}; 22};
23 23
24struct r100_cs_cube_info { 24struct r100_cs_cube_info {
25 struct radeon_object *robj; 25 struct radeon_bo *robj;
26 unsigned offset; 26 unsigned offset;
27 unsigned width; 27 unsigned width;
28 unsigned height; 28 unsigned height;
29}; 29};
30 30
31struct r100_cs_track_texture { 31struct r100_cs_track_texture {
32 struct radeon_object *robj; 32 struct radeon_bo *robj;
33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */ 33 struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
34 unsigned pitch; 34 unsigned pitch;
35 unsigned width; 35 unsigned width;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 2f43ee8e4048..83378c39d0e3 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
137 137
138void rv370_pcie_gart_disable(struct radeon_device *rdev) 138void rv370_pcie_gart_disable(struct radeon_device *rdev)
139{ 139{
140 uint32_t tmp; 140 u32 tmp;
141 int r;
141 142
142 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); 143 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
143 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; 144 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
144 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); 145 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
145 if (rdev->gart.table.vram.robj) { 146 if (rdev->gart.table.vram.robj) {
146 radeon_object_kunmap(rdev->gart.table.vram.robj); 147 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
147 radeon_object_unpin(rdev->gart.table.vram.robj); 148 if (likely(r == 0)) {
149 radeon_bo_kunmap(rdev->gart.table.vram.robj);
150 radeon_bo_unpin(rdev->gart.table.vram.robj);
151 radeon_bo_unreserve(rdev->gart.table.vram.robj);
152 }
148 } 153 }
149} 154}
150 155
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev)
1181{ 1186{
1182 int r; 1187 int r;
1183 1188
1189 /* set common regs */
1190 r100_set_common_regs(rdev);
1191 /* program mc */
1184 r300_mc_program(rdev); 1192 r300_mc_program(rdev);
1185 /* Resume clock */ 1193 /* Resume clock */
1186 r300_clock_startup(rdev); 1194 r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev)
1193 if (r) 1201 if (r)
1194 return r; 1202 return r;
1195 } 1203 }
1204
1205 if (rdev->family == CHIP_R300 ||
1206 rdev->family == CHIP_R350 ||
1207 rdev->family == CHIP_RV350)
1208 r100_enable_bm(rdev);
1209
1196 if (rdev->flags & RADEON_IS_PCI) { 1210 if (rdev->flags & RADEON_IS_PCI) {
1197 r = r100_pci_gart_enable(rdev); 1211 r = r100_pci_gart_enable(rdev);
1198 if (r) 1212 if (r)
1199 return r; 1213 return r;
1200 } 1214 }
1201 /* Enable IRQ */ 1215 /* Enable IRQ */
1202 rdev->irq.sw_int = true;
1203 r100_irq_set(rdev); 1216 r100_irq_set(rdev);
1204 /* 1M ring buffer */ 1217 /* 1M ring buffer */
1205 r = r100_cp_init(rdev, 1024 * 1024); 1218 r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
1237 radeon_combios_asic_init(rdev->ddev); 1250 radeon_combios_asic_init(rdev->ddev);
1238 /* Resume clock after posting */ 1251 /* Resume clock after posting */
1239 r300_clock_startup(rdev); 1252 r300_clock_startup(rdev);
1253 /* Initialize surface registers */
1254 radeon_surface_init(rdev);
1240 return r300_startup(rdev); 1255 return r300_startup(rdev);
1241} 1256}
1242 1257
@@ -1265,7 +1280,7 @@ void r300_fini(struct radeon_device *rdev)
1265 r100_pci_gart_fini(rdev); 1280 r100_pci_gart_fini(rdev);
1266 radeon_irq_kms_fini(rdev); 1281 radeon_irq_kms_fini(rdev);
1267 radeon_fence_driver_fini(rdev); 1282 radeon_fence_driver_fini(rdev);
1268 radeon_object_fini(rdev); 1283 radeon_bo_fini(rdev);
1269 radeon_atombios_fini(rdev); 1284 radeon_atombios_fini(rdev);
1270 kfree(rdev->bios); 1285 kfree(rdev->bios);
1271 rdev->bios = NULL; 1286 rdev->bios = NULL;
@@ -1303,10 +1318,8 @@ int r300_init(struct radeon_device *rdev)
1303 RREG32(R_0007C0_CP_STAT)); 1318 RREG32(R_0007C0_CP_STAT));
1304 } 1319 }
1305 /* check if cards are posted or not */ 1320 /* check if cards are posted or not */
1306 if (!radeon_card_posted(rdev) && rdev->bios) { 1321 if (radeon_boot_test_post_card(rdev) == false)
1307 DRM_INFO("GPU not posted. posting now...\n"); 1322 return -EINVAL;
1308 radeon_combios_asic_init(rdev->ddev);
1309 }
1310 /* Set asic errata */ 1323 /* Set asic errata */
1311 r300_errata(rdev); 1324 r300_errata(rdev);
1312 /* Initialize clocks */ 1325 /* Initialize clocks */
@@ -1325,7 +1338,7 @@ int r300_init(struct radeon_device *rdev)
1325 if (r) 1338 if (r)
1326 return r; 1339 return r;
1327 /* Memory manager */ 1340 /* Memory manager */
1328 r = radeon_object_init(rdev); 1341 r = radeon_bo_init(rdev);
1329 if (r) 1342 if (r)
1330 return r; 1343 return r;
1331 if (rdev->flags & RADEON_IS_PCIE) { 1344 if (rdev->flags & RADEON_IS_PCIE) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 1cefdbcc0850..c05a7270cf0c 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
169{ 169{
170 int r; 170 int r;
171 171
172 /* set common regs */
173 r100_set_common_regs(rdev);
174 /* program mc */
172 r300_mc_program(rdev); 175 r300_mc_program(rdev);
173 /* Resume clock */ 176 /* Resume clock */
174 r420_clock_resume(rdev); 177 r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
186 } 189 }
187 r420_pipes_init(rdev); 190 r420_pipes_init(rdev);
188 /* Enable IRQ */ 191 /* Enable IRQ */
189 rdev->irq.sw_int = true;
190 r100_irq_set(rdev); 192 r100_irq_set(rdev);
191 /* 1M ring buffer */ 193 /* 1M ring buffer */
192 r = r100_cp_init(rdev, 1024 * 1024); 194 r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
229 } 231 }
230 /* Resume clock after posting */ 232 /* Resume clock after posting */
231 r420_clock_resume(rdev); 233 r420_clock_resume(rdev);
232 234 /* Initialize surface registers */
235 radeon_surface_init(rdev);
233 return r420_startup(rdev); 236 return r420_startup(rdev);
234} 237}
235 238
@@ -258,7 +261,7 @@ void r420_fini(struct radeon_device *rdev)
258 radeon_agp_fini(rdev); 261 radeon_agp_fini(rdev);
259 radeon_irq_kms_fini(rdev); 262 radeon_irq_kms_fini(rdev);
260 radeon_fence_driver_fini(rdev); 263 radeon_fence_driver_fini(rdev);
261 radeon_object_fini(rdev); 264 radeon_bo_fini(rdev);
262 if (rdev->is_atom_bios) { 265 if (rdev->is_atom_bios) {
263 radeon_atombios_fini(rdev); 266 radeon_atombios_fini(rdev);
264 } else { 267 } else {
@@ -301,14 +304,9 @@ int r420_init(struct radeon_device *rdev)
301 RREG32(R_0007C0_CP_STAT)); 304 RREG32(R_0007C0_CP_STAT));
302 } 305 }
303 /* check if cards are posted or not */ 306 /* check if cards are posted or not */
304 if (!radeon_card_posted(rdev) && rdev->bios) { 307 if (radeon_boot_test_post_card(rdev) == false)
305 DRM_INFO("GPU not posted. posting now...\n"); 308 return -EINVAL;
306 if (rdev->is_atom_bios) { 309
307 atom_asic_init(rdev->mode_info.atom_context);
308 } else {
309 radeon_combios_asic_init(rdev->ddev);
310 }
311 }
312 /* Initialize clocks */ 310 /* Initialize clocks */
313 radeon_get_clock_info(rdev->ddev); 311 radeon_get_clock_info(rdev->ddev);
314 /* Initialize power management */ 312 /* Initialize power management */
@@ -331,10 +329,13 @@ int r420_init(struct radeon_device *rdev)
331 return r; 329 return r;
332 } 330 }
333 /* Memory manager */ 331 /* Memory manager */
334 r = radeon_object_init(rdev); 332 r = radeon_bo_init(rdev);
335 if (r) { 333 if (r) {
336 return r; 334 return r;
337 } 335 }
336 if (rdev->family == CHIP_R420)
337 r100_enable_bm(rdev);
338
338 if (rdev->flags & RADEON_IS_PCIE) { 339 if (rdev->flags & RADEON_IS_PCIE) {
339 r = rv370_pcie_gart_init(rdev); 340 r = rv370_pcie_gart_init(rdev);
340 if (r) 341 if (r)
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 7baa73955563..74ad89bdf2b5 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -716,6 +716,8 @@
716 716
717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988 717#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
718 718
719#define AVIVO_DC_GPIO_HPD_A 0x7e94
720
719#define AVIVO_GPIO_0 0x7e30 721#define AVIVO_GPIO_0 0x7e30
720#define AVIVO_GPIO_1 0x7e40 722#define AVIVO_GPIO_1 0x7e40
721#define AVIVO_GPIO_2 0x7e50 723#define AVIVO_GPIO_2 0x7e50
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index f7435185c0a6..0f3843b6dac7 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
185 return r; 185 return r;
186 } 186 }
187 /* Enable IRQ */ 187 /* Enable IRQ */
188 rdev->irq.sw_int = true;
189 rs600_irq_set(rdev); 188 rs600_irq_set(rdev);
190 /* 1M ring buffer */ 189 /* 1M ring buffer */
191 r = r100_cp_init(rdev, 1024 * 1024); 190 r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
221 atom_asic_init(rdev->mode_info.atom_context); 220 atom_asic_init(rdev->mode_info.atom_context);
222 /* Resume clock after posting */ 221 /* Resume clock after posting */
223 rv515_clock_startup(rdev); 222 rv515_clock_startup(rdev);
223 /* Initialize surface registers */
224 radeon_surface_init(rdev);
224 return r520_startup(rdev); 225 return r520_startup(rdev);
225} 226}
226 227
@@ -254,6 +255,9 @@ int r520_init(struct radeon_device *rdev)
254 RREG32(R_0007C0_CP_STAT)); 255 RREG32(R_0007C0_CP_STAT));
255 } 256 }
256 /* check if cards are posted or not */ 257 /* check if cards are posted or not */
258 if (radeon_boot_test_post_card(rdev) == false)
259 return -EINVAL;
260
257 if (!radeon_card_posted(rdev) && rdev->bios) { 261 if (!radeon_card_posted(rdev) && rdev->bios) {
258 DRM_INFO("GPU not posted. posting now...\n"); 262 DRM_INFO("GPU not posted. posting now...\n");
259 atom_asic_init(rdev->mode_info.atom_context); 263 atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@ int r520_init(struct radeon_device *rdev)
277 if (r) 281 if (r)
278 return r; 282 return r;
279 /* Memory manager */ 283 /* Memory manager */
280 r = radeon_object_init(rdev); 284 r = radeon_bo_init(rdev);
281 if (r) 285 if (r)
282 return r; 286 return r;
283 r = rv370_pcie_gart_init(rdev); 287 r = rv370_pcie_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6740ed24358f..36656bd110bf 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -38,8 +38,10 @@
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
41#define RLC_UCODE_SIZE 768
41#define R700_PFP_UCODE_SIZE 848 42#define R700_PFP_UCODE_SIZE 848
42#define R700_PM4_UCODE_SIZE 1360 43#define R700_PM4_UCODE_SIZE 1360
44#define R700_RLC_UCODE_SIZE 1024
43 45
44/* Firmware Names */ 46/* Firmware Names */
45MODULE_FIRMWARE("radeon/R600_pfp.bin"); 47MODULE_FIRMWARE("radeon/R600_pfp.bin");
@@ -62,6 +64,8 @@ MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62MODULE_FIRMWARE("radeon/RV730_me.bin"); 64MODULE_FIRMWARE("radeon/RV730_me.bin");
63MODULE_FIRMWARE("radeon/RV710_pfp.bin"); 65MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64MODULE_FIRMWARE("radeon/RV710_me.bin"); 66MODULE_FIRMWARE("radeon/RV710_me.bin");
67MODULE_FIRMWARE("radeon/R600_rlc.bin");
68MODULE_FIRMWARE("radeon/R700_rlc.bin");
65 69
66int r600_debugfs_mc_info_init(struct radeon_device *rdev); 70int r600_debugfs_mc_info_init(struct radeon_device *rdev);
67 71
@@ -70,6 +74,281 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
70void r600_gpu_init(struct radeon_device *rdev); 74void r600_gpu_init(struct radeon_device *rdev);
71void r600_fini(struct radeon_device *rdev); 75void r600_fini(struct radeon_device *rdev);
72 76
77/* hpd for digital panel detect/disconnect */
78bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
79{
80 bool connected = false;
81
82 if (ASIC_IS_DCE3(rdev)) {
83 switch (hpd) {
84 case RADEON_HPD_1:
85 if (RREG32(DC_HPD1_INT_STATUS) & DC_HPDx_SENSE)
86 connected = true;
87 break;
88 case RADEON_HPD_2:
89 if (RREG32(DC_HPD2_INT_STATUS) & DC_HPDx_SENSE)
90 connected = true;
91 break;
92 case RADEON_HPD_3:
93 if (RREG32(DC_HPD3_INT_STATUS) & DC_HPDx_SENSE)
94 connected = true;
95 break;
96 case RADEON_HPD_4:
97 if (RREG32(DC_HPD4_INT_STATUS) & DC_HPDx_SENSE)
98 connected = true;
99 break;
100 /* DCE 3.2 */
101 case RADEON_HPD_5:
102 if (RREG32(DC_HPD5_INT_STATUS) & DC_HPDx_SENSE)
103 connected = true;
104 break;
105 case RADEON_HPD_6:
106 if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
107 connected = true;
108 break;
109 default:
110 break;
111 }
112 } else {
113 switch (hpd) {
114 case RADEON_HPD_1:
115 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
116 connected = true;
117 break;
118 case RADEON_HPD_2:
119 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
120 connected = true;
121 break;
122 case RADEON_HPD_3:
123 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS) & DC_HOT_PLUG_DETECTx_SENSE)
124 connected = true;
125 break;
126 default:
127 break;
128 }
129 }
130 return connected;
131}
132
133void r600_hpd_set_polarity(struct radeon_device *rdev,
134 enum radeon_hpd_id hpd)
135{
136 u32 tmp;
137 bool connected = r600_hpd_sense(rdev, hpd);
138
139 if (ASIC_IS_DCE3(rdev)) {
140 switch (hpd) {
141 case RADEON_HPD_1:
142 tmp = RREG32(DC_HPD1_INT_CONTROL);
143 if (connected)
144 tmp &= ~DC_HPDx_INT_POLARITY;
145 else
146 tmp |= DC_HPDx_INT_POLARITY;
147 WREG32(DC_HPD1_INT_CONTROL, tmp);
148 break;
149 case RADEON_HPD_2:
150 tmp = RREG32(DC_HPD2_INT_CONTROL);
151 if (connected)
152 tmp &= ~DC_HPDx_INT_POLARITY;
153 else
154 tmp |= DC_HPDx_INT_POLARITY;
155 WREG32(DC_HPD2_INT_CONTROL, tmp);
156 break;
157 case RADEON_HPD_3:
158 tmp = RREG32(DC_HPD3_INT_CONTROL);
159 if (connected)
160 tmp &= ~DC_HPDx_INT_POLARITY;
161 else
162 tmp |= DC_HPDx_INT_POLARITY;
163 WREG32(DC_HPD3_INT_CONTROL, tmp);
164 break;
165 case RADEON_HPD_4:
166 tmp = RREG32(DC_HPD4_INT_CONTROL);
167 if (connected)
168 tmp &= ~DC_HPDx_INT_POLARITY;
169 else
170 tmp |= DC_HPDx_INT_POLARITY;
171 WREG32(DC_HPD4_INT_CONTROL, tmp);
172 break;
173 case RADEON_HPD_5:
174 tmp = RREG32(DC_HPD5_INT_CONTROL);
175 if (connected)
176 tmp &= ~DC_HPDx_INT_POLARITY;
177 else
178 tmp |= DC_HPDx_INT_POLARITY;
179 WREG32(DC_HPD5_INT_CONTROL, tmp);
180 break;
181 /* DCE 3.2 */
182 case RADEON_HPD_6:
183 tmp = RREG32(DC_HPD6_INT_CONTROL);
184 if (connected)
185 tmp &= ~DC_HPDx_INT_POLARITY;
186 else
187 tmp |= DC_HPDx_INT_POLARITY;
188 WREG32(DC_HPD6_INT_CONTROL, tmp);
189 break;
190 default:
191 break;
192 }
193 } else {
194 switch (hpd) {
195 case RADEON_HPD_1:
196 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
197 if (connected)
198 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
199 else
200 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
201 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
202 break;
203 case RADEON_HPD_2:
204 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
205 if (connected)
206 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
207 else
208 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
209 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
210 break;
211 case RADEON_HPD_3:
212 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
213 if (connected)
214 tmp &= ~DC_HOT_PLUG_DETECTx_INT_POLARITY;
215 else
216 tmp |= DC_HOT_PLUG_DETECTx_INT_POLARITY;
217 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
218 break;
219 default:
220 break;
221 }
222 }
223}
224
225void r600_hpd_init(struct radeon_device *rdev)
226{
227 struct drm_device *dev = rdev->ddev;
228 struct drm_connector *connector;
229
230 if (ASIC_IS_DCE3(rdev)) {
231 u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
232 if (ASIC_IS_DCE32(rdev))
233 tmp |= DC_HPDx_EN;
234
235 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
236 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
237 switch (radeon_connector->hpd.hpd) {
238 case RADEON_HPD_1:
239 WREG32(DC_HPD1_CONTROL, tmp);
240 rdev->irq.hpd[0] = true;
241 break;
242 case RADEON_HPD_2:
243 WREG32(DC_HPD2_CONTROL, tmp);
244 rdev->irq.hpd[1] = true;
245 break;
246 case RADEON_HPD_3:
247 WREG32(DC_HPD3_CONTROL, tmp);
248 rdev->irq.hpd[2] = true;
249 break;
250 case RADEON_HPD_4:
251 WREG32(DC_HPD4_CONTROL, tmp);
252 rdev->irq.hpd[3] = true;
253 break;
254 /* DCE 3.2 */
255 case RADEON_HPD_5:
256 WREG32(DC_HPD5_CONTROL, tmp);
257 rdev->irq.hpd[4] = true;
258 break;
259 case RADEON_HPD_6:
260 WREG32(DC_HPD6_CONTROL, tmp);
261 rdev->irq.hpd[5] = true;
262 break;
263 default:
264 break;
265 }
266 }
267 } else {
268 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
269 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
270 switch (radeon_connector->hpd.hpd) {
271 case RADEON_HPD_1:
272 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
273 rdev->irq.hpd[0] = true;
274 break;
275 case RADEON_HPD_2:
276 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
277 rdev->irq.hpd[1] = true;
278 break;
279 case RADEON_HPD_3:
280 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
281 rdev->irq.hpd[2] = true;
282 break;
283 default:
284 break;
285 }
286 }
287 }
288 r600_irq_set(rdev);
289}
290
291void r600_hpd_fini(struct radeon_device *rdev)
292{
293 struct drm_device *dev = rdev->ddev;
294 struct drm_connector *connector;
295
296 if (ASIC_IS_DCE3(rdev)) {
297 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
298 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
299 switch (radeon_connector->hpd.hpd) {
300 case RADEON_HPD_1:
301 WREG32(DC_HPD1_CONTROL, 0);
302 rdev->irq.hpd[0] = false;
303 break;
304 case RADEON_HPD_2:
305 WREG32(DC_HPD2_CONTROL, 0);
306 rdev->irq.hpd[1] = false;
307 break;
308 case RADEON_HPD_3:
309 WREG32(DC_HPD3_CONTROL, 0);
310 rdev->irq.hpd[2] = false;
311 break;
312 case RADEON_HPD_4:
313 WREG32(DC_HPD4_CONTROL, 0);
314 rdev->irq.hpd[3] = false;
315 break;
316 /* DCE 3.2 */
317 case RADEON_HPD_5:
318 WREG32(DC_HPD5_CONTROL, 0);
319 rdev->irq.hpd[4] = false;
320 break;
321 case RADEON_HPD_6:
322 WREG32(DC_HPD6_CONTROL, 0);
323 rdev->irq.hpd[5] = false;
324 break;
325 default:
326 break;
327 }
328 }
329 } else {
330 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
331 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
332 switch (radeon_connector->hpd.hpd) {
333 case RADEON_HPD_1:
334 WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
335 rdev->irq.hpd[0] = false;
336 break;
337 case RADEON_HPD_2:
338 WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
339 rdev->irq.hpd[1] = false;
340 break;
341 case RADEON_HPD_3:
342 WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
343 rdev->irq.hpd[2] = false;
344 break;
345 default:
346 break;
347 }
348 }
349 }
350}
351
73/* 352/*
74 * R600 PCIE GART 353 * R600 PCIE GART
75 */ 354 */
@@ -180,7 +459,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
180void r600_pcie_gart_disable(struct radeon_device *rdev) 459void r600_pcie_gart_disable(struct radeon_device *rdev)
181{ 460{
182 u32 tmp; 461 u32 tmp;
183 int i; 462 int i, r;
184 463
185 /* Disable all tables */ 464 /* Disable all tables */
186 for (i = 0; i < 7; i++) 465 for (i = 0; i < 7; i++)
@@ -208,8 +487,12 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); 487 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); 488 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
210 if (rdev->gart.table.vram.robj) { 489 if (rdev->gart.table.vram.robj) {
211 radeon_object_kunmap(rdev->gart.table.vram.robj); 490 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
212 radeon_object_unpin(rdev->gart.table.vram.robj); 491 if (likely(r == 0)) {
492 radeon_bo_kunmap(rdev->gart.table.vram.robj);
493 radeon_bo_unpin(rdev->gart.table.vram.robj);
494 radeon_bo_unreserve(rdev->gart.table.vram.robj);
495 }
213 } 496 }
214} 497}
215 498
@@ -1101,6 +1384,10 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1101 (void)RREG32(PCIE_PORT_DATA); 1384 (void)RREG32(PCIE_PORT_DATA);
1102} 1385}
1103 1386
1387void r600_hdp_flush(struct radeon_device *rdev)
1388{
1389 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
1390}
1104 1391
1105/* 1392/*
1106 * CP & Ring 1393 * CP & Ring
@@ -1110,11 +1397,12 @@ void r600_cp_stop(struct radeon_device *rdev)
1110 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1397 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1111} 1398}
1112 1399
1113int r600_cp_init_microcode(struct radeon_device *rdev) 1400int r600_init_microcode(struct radeon_device *rdev)
1114{ 1401{
1115 struct platform_device *pdev; 1402 struct platform_device *pdev;
1116 const char *chip_name; 1403 const char *chip_name;
1117 size_t pfp_req_size, me_req_size; 1404 const char *rlc_chip_name;
1405 size_t pfp_req_size, me_req_size, rlc_req_size;
1118 char fw_name[30]; 1406 char fw_name[30];
1119 int err; 1407 int err;
1120 1408
@@ -1128,30 +1416,62 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1128 } 1416 }
1129 1417
1130 switch (rdev->family) { 1418 switch (rdev->family) {
1131 case CHIP_R600: chip_name = "R600"; break; 1419 case CHIP_R600:
1132 case CHIP_RV610: chip_name = "RV610"; break; 1420 chip_name = "R600";
1133 case CHIP_RV630: chip_name = "RV630"; break; 1421 rlc_chip_name = "R600";
1134 case CHIP_RV620: chip_name = "RV620"; break; 1422 break;
1135 case CHIP_RV635: chip_name = "RV635"; break; 1423 case CHIP_RV610:
1136 case CHIP_RV670: chip_name = "RV670"; break; 1424 chip_name = "RV610";
1425 rlc_chip_name = "R600";
1426 break;
1427 case CHIP_RV630:
1428 chip_name = "RV630";
1429 rlc_chip_name = "R600";
1430 break;
1431 case CHIP_RV620:
1432 chip_name = "RV620";
1433 rlc_chip_name = "R600";
1434 break;
1435 case CHIP_RV635:
1436 chip_name = "RV635";
1437 rlc_chip_name = "R600";
1438 break;
1439 case CHIP_RV670:
1440 chip_name = "RV670";
1441 rlc_chip_name = "R600";
1442 break;
1137 case CHIP_RS780: 1443 case CHIP_RS780:
1138 case CHIP_RS880: chip_name = "RS780"; break; 1444 case CHIP_RS880:
1139 case CHIP_RV770: chip_name = "RV770"; break; 1445 chip_name = "RS780";
1446 rlc_chip_name = "R600";
1447 break;
1448 case CHIP_RV770:
1449 chip_name = "RV770";
1450 rlc_chip_name = "R700";
1451 break;
1140 case CHIP_RV730: 1452 case CHIP_RV730:
1141 case CHIP_RV740: chip_name = "RV730"; break; 1453 case CHIP_RV740:
1142 case CHIP_RV710: chip_name = "RV710"; break; 1454 chip_name = "RV730";
1455 rlc_chip_name = "R700";
1456 break;
1457 case CHIP_RV710:
1458 chip_name = "RV710";
1459 rlc_chip_name = "R700";
1460 break;
1143 default: BUG(); 1461 default: BUG();
1144 } 1462 }
1145 1463
1146 if (rdev->family >= CHIP_RV770) { 1464 if (rdev->family >= CHIP_RV770) {
1147 pfp_req_size = R700_PFP_UCODE_SIZE * 4; 1465 pfp_req_size = R700_PFP_UCODE_SIZE * 4;
1148 me_req_size = R700_PM4_UCODE_SIZE * 4; 1466 me_req_size = R700_PM4_UCODE_SIZE * 4;
1467 rlc_req_size = R700_RLC_UCODE_SIZE * 4;
1149 } else { 1468 } else {
1150 pfp_req_size = PFP_UCODE_SIZE * 4; 1469 pfp_req_size = PFP_UCODE_SIZE * 4;
1151 me_req_size = PM4_UCODE_SIZE * 12; 1470 me_req_size = PM4_UCODE_SIZE * 12;
1471 rlc_req_size = RLC_UCODE_SIZE * 4;
1152 } 1472 }
1153 1473
1154 DRM_INFO("Loading %s CP Microcode\n", chip_name); 1474 DRM_INFO("Loading %s Microcode\n", chip_name);
1155 1475
1156 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name); 1476 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
1157 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev); 1477 err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
@@ -1175,6 +1495,18 @@ int r600_cp_init_microcode(struct radeon_device *rdev)
1175 rdev->me_fw->size, fw_name); 1495 rdev->me_fw->size, fw_name);
1176 err = -EINVAL; 1496 err = -EINVAL;
1177 } 1497 }
1498
1499 snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
1500 err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
1501 if (err)
1502 goto out;
1503 if (rdev->rlc_fw->size != rlc_req_size) {
1504 printk(KERN_ERR
1505 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1506 rdev->rlc_fw->size, fw_name);
1507 err = -EINVAL;
1508 }
1509
1178out: 1510out:
1179 platform_device_unregister(pdev); 1511 platform_device_unregister(pdev);
1180 1512
@@ -1187,6 +1519,8 @@ out:
1187 rdev->pfp_fw = NULL; 1519 rdev->pfp_fw = NULL;
1188 release_firmware(rdev->me_fw); 1520 release_firmware(rdev->me_fw);
1189 rdev->me_fw = NULL; 1521 rdev->me_fw = NULL;
1522 release_firmware(rdev->rlc_fw);
1523 rdev->rlc_fw = NULL;
1190 } 1524 }
1191 return err; 1525 return err;
1192} 1526}
@@ -1381,10 +1715,16 @@ int r600_ring_test(struct radeon_device *rdev)
1381 1715
1382void r600_wb_disable(struct radeon_device *rdev) 1716void r600_wb_disable(struct radeon_device *rdev)
1383{ 1717{
1718 int r;
1719
1384 WREG32(SCRATCH_UMSK, 0); 1720 WREG32(SCRATCH_UMSK, 0);
1385 if (rdev->wb.wb_obj) { 1721 if (rdev->wb.wb_obj) {
1386 radeon_object_kunmap(rdev->wb.wb_obj); 1722 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1387 radeon_object_unpin(rdev->wb.wb_obj); 1723 if (unlikely(r != 0))
1724 return;
1725 radeon_bo_kunmap(rdev->wb.wb_obj);
1726 radeon_bo_unpin(rdev->wb.wb_obj);
1727 radeon_bo_unreserve(rdev->wb.wb_obj);
1388 } 1728 }
1389} 1729}
1390 1730
@@ -1392,7 +1732,7 @@ void r600_wb_fini(struct radeon_device *rdev)
1392{ 1732{
1393 r600_wb_disable(rdev); 1733 r600_wb_disable(rdev);
1394 if (rdev->wb.wb_obj) { 1734 if (rdev->wb.wb_obj) {
1395 radeon_object_unref(&rdev->wb.wb_obj); 1735 radeon_bo_unref(&rdev->wb.wb_obj);
1396 rdev->wb.wb = NULL; 1736 rdev->wb.wb = NULL;
1397 rdev->wb.wb_obj = NULL; 1737 rdev->wb.wb_obj = NULL;
1398 } 1738 }
@@ -1403,22 +1743,29 @@ int r600_wb_enable(struct radeon_device *rdev)
1403 int r; 1743 int r;
1404 1744
1405 if (rdev->wb.wb_obj == NULL) { 1745 if (rdev->wb.wb_obj == NULL) {
1406 r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true, 1746 r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
1407 RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); 1747 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
1408 if (r) { 1748 if (r) {
1409 dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); 1749 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
1410 return r; 1750 return r;
1411 } 1751 }
1412 r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, 1752 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
1753 if (unlikely(r != 0)) {
1754 r600_wb_fini(rdev);
1755 return r;
1756 }
1757 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
1413 &rdev->wb.gpu_addr); 1758 &rdev->wb.gpu_addr);
1414 if (r) { 1759 if (r) {
1415 dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); 1760 radeon_bo_unreserve(rdev->wb.wb_obj);
1761 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
1416 r600_wb_fini(rdev); 1762 r600_wb_fini(rdev);
1417 return r; 1763 return r;
1418 } 1764 }
1419 r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); 1765 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
1766 radeon_bo_unreserve(rdev->wb.wb_obj);
1420 if (r) { 1767 if (r) {
1421 dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); 1768 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
1422 r600_wb_fini(rdev); 1769 r600_wb_fini(rdev);
1423 return r; 1770 return r;
1424 } 1771 }
@@ -1433,10 +1780,14 @@ int r600_wb_enable(struct radeon_device *rdev)
1433void r600_fence_ring_emit(struct radeon_device *rdev, 1780void r600_fence_ring_emit(struct radeon_device *rdev,
1434 struct radeon_fence *fence) 1781 struct radeon_fence *fence)
1435{ 1782{
1783 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1436 /* Emit fence sequence & fire IRQ */ 1784 /* Emit fence sequence & fire IRQ */
1437 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 1785 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1438 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2)); 1786 radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
1439 radeon_ring_write(rdev, fence->seq); 1787 radeon_ring_write(rdev, fence->seq);
1788 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1789 radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
1790 radeon_ring_write(rdev, RB_INT_STAT);
1440} 1791}
1441 1792
1442int r600_copy_dma(struct radeon_device *rdev, 1793int r600_copy_dma(struct radeon_device *rdev,
@@ -1459,18 +1810,6 @@ int r600_copy_blit(struct radeon_device *rdev,
1459 return 0; 1810 return 0;
1460} 1811}
1461 1812
1462int r600_irq_process(struct radeon_device *rdev)
1463{
1464 /* FIXME: implement */
1465 return 0;
1466}
1467
1468int r600_irq_set(struct radeon_device *rdev)
1469{
1470 /* FIXME: implement */
1471 return 0;
1472}
1473
1474int r600_set_surface_reg(struct radeon_device *rdev, int reg, 1813int r600_set_surface_reg(struct radeon_device *rdev, int reg,
1475 uint32_t tiling_flags, uint32_t pitch, 1814 uint32_t tiling_flags, uint32_t pitch,
1476 uint32_t offset, uint32_t obj_size) 1815 uint32_t offset, uint32_t obj_size)
@@ -1506,6 +1845,14 @@ int r600_startup(struct radeon_device *rdev)
1506{ 1845{
1507 int r; 1846 int r;
1508 1847
1848 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1849 r = r600_init_microcode(rdev);
1850 if (r) {
1851 DRM_ERROR("Failed to load firmware!\n");
1852 return r;
1853 }
1854 }
1855
1509 r600_mc_program(rdev); 1856 r600_mc_program(rdev);
1510 if (rdev->flags & RADEON_IS_AGP) { 1857 if (rdev->flags & RADEON_IS_AGP) {
1511 r600_agp_enable(rdev); 1858 r600_agp_enable(rdev);
@@ -1516,13 +1863,26 @@ int r600_startup(struct radeon_device *rdev)
1516 } 1863 }
1517 r600_gpu_init(rdev); 1864 r600_gpu_init(rdev);
1518 1865
1519 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 1866 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1520 &rdev->r600_blit.shader_gpu_addr); 1867 if (unlikely(r != 0))
1868 return r;
1869 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
1870 &rdev->r600_blit.shader_gpu_addr);
1871 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1521 if (r) { 1872 if (r) {
1522 DRM_ERROR("failed to pin blit object %d\n", r); 1873 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
1523 return r; 1874 return r;
1524 } 1875 }
1525 1876
1877 /* Enable IRQ */
1878 r = r600_irq_init(rdev);
1879 if (r) {
1880 DRM_ERROR("radeon: IH init failed (%d).\n", r);
1881 radeon_irq_kms_fini(rdev);
1882 return r;
1883 }
1884 r600_irq_set(rdev);
1885
1526 r = radeon_ring_init(rdev, rdev->cp.ring_size); 1886 r = radeon_ring_init(rdev, rdev->cp.ring_size);
1527 if (r) 1887 if (r)
1528 return r; 1888 return r;
@@ -1583,13 +1943,19 @@ int r600_resume(struct radeon_device *rdev)
1583 1943
1584int r600_suspend(struct radeon_device *rdev) 1944int r600_suspend(struct radeon_device *rdev)
1585{ 1945{
1946 int r;
1947
1586 /* FIXME: we should wait for ring to be empty */ 1948 /* FIXME: we should wait for ring to be empty */
1587 r600_cp_stop(rdev); 1949 r600_cp_stop(rdev);
1588 rdev->cp.ready = false; 1950 rdev->cp.ready = false;
1589 r600_wb_disable(rdev); 1951 r600_wb_disable(rdev);
1590 r600_pcie_gart_disable(rdev); 1952 r600_pcie_gart_disable(rdev);
1591 /* unpin shaders bo */ 1953 /* unpin shaders bo */
1592 radeon_object_unpin(rdev->r600_blit.shader_obj); 1954 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1955 if (unlikely(r != 0))
1956 return r;
1957 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1958 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1593 return 0; 1959 return 0;
1594} 1960}
1595 1961
@@ -1627,7 +1993,11 @@ int r600_init(struct radeon_device *rdev)
1627 if (r) 1993 if (r)
1628 return r; 1994 return r;
1629 /* Post card if necessary */ 1995 /* Post card if necessary */
1630 if (!r600_card_posted(rdev) && rdev->bios) { 1996 if (!r600_card_posted(rdev)) {
1997 if (!rdev->bios) {
1998 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1999 return -EINVAL;
2000 }
1631 DRM_INFO("GPU not posted. posting now...\n"); 2001 DRM_INFO("GPU not posted. posting now...\n");
1632 atom_asic_init(rdev->mode_info.atom_context); 2002 atom_asic_init(rdev->mode_info.atom_context);
1633 } 2003 }
@@ -1650,31 +2020,31 @@ int r600_init(struct radeon_device *rdev)
1650 if (r) 2020 if (r)
1651 return r; 2021 return r;
1652 /* Memory manager */ 2022 /* Memory manager */
1653 r = radeon_object_init(rdev); 2023 r = radeon_bo_init(rdev);
1654 if (r) 2024 if (r)
1655 return r; 2025 return r;
2026
2027 r = radeon_irq_kms_init(rdev);
2028 if (r)
2029 return r;
2030
1656 rdev->cp.ring_obj = NULL; 2031 rdev->cp.ring_obj = NULL;
1657 r600_ring_init(rdev, 1024 * 1024); 2032 r600_ring_init(rdev, 1024 * 1024);
1658 2033
1659 if (!rdev->me_fw || !rdev->pfp_fw) { 2034 rdev->ih.ring_obj = NULL;
1660 r = r600_cp_init_microcode(rdev); 2035 r600_ih_ring_init(rdev, 64 * 1024);
1661 if (r) {
1662 DRM_ERROR("Failed to load firmware!\n");
1663 return r;
1664 }
1665 }
1666 2036
1667 r = r600_pcie_gart_init(rdev); 2037 r = r600_pcie_gart_init(rdev);
1668 if (r) 2038 if (r)
1669 return r; 2039 return r;
1670 2040
1671 rdev->accel_working = true;
1672 r = r600_blit_init(rdev); 2041 r = r600_blit_init(rdev);
1673 if (r) { 2042 if (r) {
1674 DRM_ERROR("radeon: failled blitter (%d).\n", r); 2043 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1675 return r; 2044 return r;
1676 } 2045 }
1677 2046
2047 rdev->accel_working = true;
1678 r = r600_startup(rdev); 2048 r = r600_startup(rdev);
1679 if (r) { 2049 if (r) {
1680 r600_suspend(rdev); 2050 r600_suspend(rdev);
@@ -1686,12 +2056,12 @@ int r600_init(struct radeon_device *rdev)
1686 if (rdev->accel_working) { 2056 if (rdev->accel_working) {
1687 r = radeon_ib_pool_init(rdev); 2057 r = radeon_ib_pool_init(rdev);
1688 if (r) { 2058 if (r) {
1689 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 2059 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1690 rdev->accel_working = false; 2060 rdev->accel_working = false;
1691 } 2061 }
1692 r = r600_ib_test(rdev); 2062 r = r600_ib_test(rdev);
1693 if (r) { 2063 if (r) {
1694 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 2064 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1695 rdev->accel_working = false; 2065 rdev->accel_working = false;
1696 } 2066 }
1697 } 2067 }
@@ -1704,6 +2074,8 @@ void r600_fini(struct radeon_device *rdev)
1704 r600_suspend(rdev); 2074 r600_suspend(rdev);
1705 2075
1706 r600_blit_fini(rdev); 2076 r600_blit_fini(rdev);
2077 r600_irq_fini(rdev);
2078 radeon_irq_kms_fini(rdev);
1707 radeon_ring_fini(rdev); 2079 radeon_ring_fini(rdev);
1708 r600_wb_fini(rdev); 2080 r600_wb_fini(rdev);
1709 r600_pcie_gart_fini(rdev); 2081 r600_pcie_gart_fini(rdev);
@@ -1712,7 +2084,7 @@ void r600_fini(struct radeon_device *rdev)
1712 radeon_clocks_fini(rdev); 2084 radeon_clocks_fini(rdev);
1713 if (rdev->flags & RADEON_IS_AGP) 2085 if (rdev->flags & RADEON_IS_AGP)
1714 radeon_agp_fini(rdev); 2086 radeon_agp_fini(rdev);
1715 radeon_object_fini(rdev); 2087 radeon_bo_fini(rdev);
1716 radeon_atombios_fini(rdev); 2088 radeon_atombios_fini(rdev);
1717 kfree(rdev->bios); 2089 kfree(rdev->bios);
1718 rdev->bios = NULL; 2090 rdev->bios = NULL;
@@ -1798,8 +2170,657 @@ int r600_ib_test(struct radeon_device *rdev)
1798 return r; 2170 return r;
1799} 2171}
1800 2172
2173/*
2174 * Interrupts
2175 *
2176 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2177 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2178 * writing to the ring and the GPU consuming, the GPU writes to the ring
2179 * and host consumes. As the host irq handler processes interrupts, it
2180 * increments the rptr. When the rptr catches up with the wptr, all the
2181 * current interrupts have been processed.
2182 */
2183
2184void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
2185{
2186 u32 rb_bufsz;
2187
2188 /* Align ring size */
2189 rb_bufsz = drm_order(ring_size / 4);
2190 ring_size = (1 << rb_bufsz) * 4;
2191 rdev->ih.ring_size = ring_size;
2192 rdev->ih.align_mask = 4 - 1;
2193}
2194
2195static int r600_ih_ring_alloc(struct radeon_device *rdev, unsigned ring_size)
2196{
2197 int r;
2198
2199 rdev->ih.ring_size = ring_size;
2200 /* Allocate ring buffer */
2201 if (rdev->ih.ring_obj == NULL) {
2202 r = radeon_bo_create(rdev, NULL, rdev->ih.ring_size,
2203 true,
2204 RADEON_GEM_DOMAIN_GTT,
2205 &rdev->ih.ring_obj);
2206 if (r) {
2207 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
2208 return r;
2209 }
2210 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2211 if (unlikely(r != 0))
2212 return r;
2213 r = radeon_bo_pin(rdev->ih.ring_obj,
2214 RADEON_GEM_DOMAIN_GTT,
2215 &rdev->ih.gpu_addr);
2216 if (r) {
2217 radeon_bo_unreserve(rdev->ih.ring_obj);
2218 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r);
2219 return r;
2220 }
2221 r = radeon_bo_kmap(rdev->ih.ring_obj,
2222 (void **)&rdev->ih.ring);
2223 radeon_bo_unreserve(rdev->ih.ring_obj);
2224 if (r) {
2225 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r);
2226 return r;
2227 }
2228 }
2229 rdev->ih.ptr_mask = (rdev->cp.ring_size / 4) - 1;
2230 rdev->ih.rptr = 0;
2231
2232 return 0;
2233}
2234
2235static void r600_ih_ring_fini(struct radeon_device *rdev)
2236{
2237 int r;
2238 if (rdev->ih.ring_obj) {
2239 r = radeon_bo_reserve(rdev->ih.ring_obj, false);
2240 if (likely(r == 0)) {
2241 radeon_bo_kunmap(rdev->ih.ring_obj);
2242 radeon_bo_unpin(rdev->ih.ring_obj);
2243 radeon_bo_unreserve(rdev->ih.ring_obj);
2244 }
2245 radeon_bo_unref(&rdev->ih.ring_obj);
2246 rdev->ih.ring = NULL;
2247 rdev->ih.ring_obj = NULL;
2248 }
2249}
2250
2251static void r600_rlc_stop(struct radeon_device *rdev)
2252{
2253
2254 if (rdev->family >= CHIP_RV770) {
2255 /* r7xx asics need to soft reset RLC before halting */
2256 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2257 RREG32(SRBM_SOFT_RESET);
2258 udelay(15000);
2259 WREG32(SRBM_SOFT_RESET, 0);
2260 RREG32(SRBM_SOFT_RESET);
2261 }
2262
2263 WREG32(RLC_CNTL, 0);
2264}
2265
2266static void r600_rlc_start(struct radeon_device *rdev)
2267{
2268 WREG32(RLC_CNTL, RLC_ENABLE);
2269}
2270
2271static int r600_rlc_init(struct radeon_device *rdev)
2272{
2273 u32 i;
2274 const __be32 *fw_data;
2275
2276 if (!rdev->rlc_fw)
2277 return -EINVAL;
2278
2279 r600_rlc_stop(rdev);
2280
2281 WREG32(RLC_HB_BASE, 0);
2282 WREG32(RLC_HB_CNTL, 0);
2283 WREG32(RLC_HB_RPTR, 0);
2284 WREG32(RLC_HB_WPTR, 0);
2285 WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
2286 WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2287 WREG32(RLC_MC_CNTL, 0);
2288 WREG32(RLC_UCODE_CNTL, 0);
2289
2290 fw_data = (const __be32 *)rdev->rlc_fw->data;
2291 if (rdev->family >= CHIP_RV770) {
2292 for (i = 0; i < R700_RLC_UCODE_SIZE; i++) {
2293 WREG32(RLC_UCODE_ADDR, i);
2294 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2295 }
2296 } else {
2297 for (i = 0; i < RLC_UCODE_SIZE; i++) {
2298 WREG32(RLC_UCODE_ADDR, i);
2299 WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2300 }
2301 }
2302 WREG32(RLC_UCODE_ADDR, 0);
2303
2304 r600_rlc_start(rdev);
2305
2306 return 0;
2307}
2308
2309static void r600_enable_interrupts(struct radeon_device *rdev)
2310{
2311 u32 ih_cntl = RREG32(IH_CNTL);
2312 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2313
2314 ih_cntl |= ENABLE_INTR;
2315 ih_rb_cntl |= IH_RB_ENABLE;
2316 WREG32(IH_CNTL, ih_cntl);
2317 WREG32(IH_RB_CNTL, ih_rb_cntl);
2318 rdev->ih.enabled = true;
2319}
2320
2321static void r600_disable_interrupts(struct radeon_device *rdev)
2322{
2323 u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
2324 u32 ih_cntl = RREG32(IH_CNTL);
2325
2326 ih_rb_cntl &= ~IH_RB_ENABLE;
2327 ih_cntl &= ~ENABLE_INTR;
2328 WREG32(IH_RB_CNTL, ih_rb_cntl);
2329 WREG32(IH_CNTL, ih_cntl);
2330 /* set rptr, wptr to 0 */
2331 WREG32(IH_RB_RPTR, 0);
2332 WREG32(IH_RB_WPTR, 0);
2333 rdev->ih.enabled = false;
2334 rdev->ih.wptr = 0;
2335 rdev->ih.rptr = 0;
2336}
2337
2338static void r600_disable_interrupt_state(struct radeon_device *rdev)
2339{
2340 u32 tmp;
2341
2342 WREG32(CP_INT_CNTL, 0);
2343 WREG32(GRBM_INT_CNTL, 0);
2344 WREG32(DxMODE_INT_MASK, 0);
2345 if (ASIC_IS_DCE3(rdev)) {
2346 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL, 0);
2347 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL, 0);
2348 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2349 WREG32(DC_HPD1_INT_CONTROL, tmp);
2350 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2351 WREG32(DC_HPD2_INT_CONTROL, tmp);
2352 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2353 WREG32(DC_HPD3_INT_CONTROL, tmp);
2354 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2355 WREG32(DC_HPD4_INT_CONTROL, tmp);
2356 if (ASIC_IS_DCE32(rdev)) {
2357 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2358 WREG32(DC_HPD5_INT_CONTROL, 0);
2359 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
2360 WREG32(DC_HPD6_INT_CONTROL, 0);
2361 }
2362 } else {
2363 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2364 WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
2365 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2366 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
2367 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2368 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
2369 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
2370 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
2371 }
2372}
2373
2374int r600_irq_init(struct radeon_device *rdev)
2375{
2376 int ret = 0;
2377 int rb_bufsz;
2378 u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
2379
2380 /* allocate ring */
2381 ret = r600_ih_ring_alloc(rdev, rdev->ih.ring_size);
2382 if (ret)
2383 return ret;
2384
2385 /* disable irqs */
2386 r600_disable_interrupts(rdev);
2387
2388 /* init rlc */
2389 ret = r600_rlc_init(rdev);
2390 if (ret) {
2391 r600_ih_ring_fini(rdev);
2392 return ret;
2393 }
2394
2395 /* setup interrupt control */
2396 /* set dummy read address to ring address */
2397 WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
2398 interrupt_cntl = RREG32(INTERRUPT_CNTL);
2399 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2400 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2401 */
2402 interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
2403 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2404 interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
2405 WREG32(INTERRUPT_CNTL, interrupt_cntl);
2406
2407 WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
2408 rb_bufsz = drm_order(rdev->ih.ring_size / 4);
2409
2410 ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
2411 IH_WPTR_OVERFLOW_CLEAR |
2412 (rb_bufsz << 1));
2413 /* WPTR writeback, not yet */
2414 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2415 WREG32(IH_RB_WPTR_ADDR_LO, 0);
2416 WREG32(IH_RB_WPTR_ADDR_HI, 0);
2417
2418 WREG32(IH_RB_CNTL, ih_rb_cntl);
2419
2420 /* set rptr, wptr to 0 */
2421 WREG32(IH_RB_RPTR, 0);
2422 WREG32(IH_RB_WPTR, 0);
2423
2424 /* Default settings for IH_CNTL (disabled at first) */
2425 ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2426 /* RPTR_REARM only works if msi's are enabled */
2427 if (rdev->msi_enabled)
2428 ih_cntl |= RPTR_REARM;
2429
2430#ifdef __BIG_ENDIAN
2431 ih_cntl |= IH_MC_SWAP(IH_MC_SWAP_32BIT);
2432#endif
2433 WREG32(IH_CNTL, ih_cntl);
2434
2435 /* force the active interrupt state to all disabled */
2436 r600_disable_interrupt_state(rdev);
2437
2438 /* enable irqs */
2439 r600_enable_interrupts(rdev);
2440
2441 return ret;
2442}
2443
2444void r600_irq_fini(struct radeon_device *rdev)
2445{
2446 r600_disable_interrupts(rdev);
2447 r600_rlc_stop(rdev);
2448 r600_ih_ring_fini(rdev);
2449}
2450
2451int r600_irq_set(struct radeon_device *rdev)
2452{
2453 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
2454 u32 mode_int = 0;
2455 u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
2456
2457 /* don't enable anything if the ih is disabled */
2458 if (!rdev->ih.enabled)
2459 return 0;
2460
2461 if (ASIC_IS_DCE3(rdev)) {
2462 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2463 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2464 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2465 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
2466 if (ASIC_IS_DCE32(rdev)) {
2467 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
2468 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
2469 }
2470 } else {
2471 hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
2472 hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
2473 hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2474 }
2475
2476 if (rdev->irq.sw_int) {
2477 DRM_DEBUG("r600_irq_set: sw int\n");
2478 cp_int_cntl |= RB_INT_ENABLE;
2479 }
2480 if (rdev->irq.crtc_vblank_int[0]) {
2481 DRM_DEBUG("r600_irq_set: vblank 0\n");
2482 mode_int |= D1MODE_VBLANK_INT_MASK;
2483 }
2484 if (rdev->irq.crtc_vblank_int[1]) {
2485 DRM_DEBUG("r600_irq_set: vblank 1\n");
2486 mode_int |= D2MODE_VBLANK_INT_MASK;
2487 }
2488 if (rdev->irq.hpd[0]) {
2489 DRM_DEBUG("r600_irq_set: hpd 1\n");
2490 hpd1 |= DC_HPDx_INT_EN;
2491 }
2492 if (rdev->irq.hpd[1]) {
2493 DRM_DEBUG("r600_irq_set: hpd 2\n");
2494 hpd2 |= DC_HPDx_INT_EN;
2495 }
2496 if (rdev->irq.hpd[2]) {
2497 DRM_DEBUG("r600_irq_set: hpd 3\n");
2498 hpd3 |= DC_HPDx_INT_EN;
2499 }
2500 if (rdev->irq.hpd[3]) {
2501 DRM_DEBUG("r600_irq_set: hpd 4\n");
2502 hpd4 |= DC_HPDx_INT_EN;
2503 }
2504 if (rdev->irq.hpd[4]) {
2505 DRM_DEBUG("r600_irq_set: hpd 5\n");
2506 hpd5 |= DC_HPDx_INT_EN;
2507 }
2508 if (rdev->irq.hpd[5]) {
2509 DRM_DEBUG("r600_irq_set: hpd 6\n");
2510 hpd6 |= DC_HPDx_INT_EN;
2511 }
2512
2513 WREG32(CP_INT_CNTL, cp_int_cntl);
2514 WREG32(DxMODE_INT_MASK, mode_int);
2515 if (ASIC_IS_DCE3(rdev)) {
2516 WREG32(DC_HPD1_INT_CONTROL, hpd1);
2517 WREG32(DC_HPD2_INT_CONTROL, hpd2);
2518 WREG32(DC_HPD3_INT_CONTROL, hpd3);
2519 WREG32(DC_HPD4_INT_CONTROL, hpd4);
2520 if (ASIC_IS_DCE32(rdev)) {
2521 WREG32(DC_HPD5_INT_CONTROL, hpd5);
2522 WREG32(DC_HPD6_INT_CONTROL, hpd6);
2523 }
2524 } else {
2525 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
2526 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
2527 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
2528 }
2529
2530 return 0;
2531}
2532
2533static inline void r600_irq_ack(struct radeon_device *rdev,
2534 u32 *disp_int,
2535 u32 *disp_int_cont,
2536 u32 *disp_int_cont2)
2537{
2538 u32 tmp;
2539
2540 if (ASIC_IS_DCE3(rdev)) {
2541 *disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
2542 *disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
2543 *disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
2544 } else {
2545 *disp_int = RREG32(DISP_INTERRUPT_STATUS);
2546 *disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
2547 *disp_int_cont2 = 0;
2548 }
2549
2550 if (*disp_int & LB_D1_VBLANK_INTERRUPT)
2551 WREG32(D1MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2552 if (*disp_int & LB_D1_VLINE_INTERRUPT)
2553 WREG32(D1MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2554 if (*disp_int & LB_D2_VBLANK_INTERRUPT)
2555 WREG32(D2MODE_VBLANK_STATUS, DxMODE_VBLANK_ACK);
2556 if (*disp_int & LB_D2_VLINE_INTERRUPT)
2557 WREG32(D2MODE_VLINE_STATUS, DxMODE_VLINE_ACK);
2558 if (*disp_int & DC_HPD1_INTERRUPT) {
2559 if (ASIC_IS_DCE3(rdev)) {
2560 tmp = RREG32(DC_HPD1_INT_CONTROL);
2561 tmp |= DC_HPDx_INT_ACK;
2562 WREG32(DC_HPD1_INT_CONTROL, tmp);
2563 } else {
2564 tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL);
2565 tmp |= DC_HPDx_INT_ACK;
2566 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
2567 }
2568 }
2569 if (*disp_int & DC_HPD2_INTERRUPT) {
2570 if (ASIC_IS_DCE3(rdev)) {
2571 tmp = RREG32(DC_HPD2_INT_CONTROL);
2572 tmp |= DC_HPDx_INT_ACK;
2573 WREG32(DC_HPD2_INT_CONTROL, tmp);
2574 } else {
2575 tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL);
2576 tmp |= DC_HPDx_INT_ACK;
2577 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
2578 }
2579 }
2580 if (*disp_int_cont & DC_HPD3_INTERRUPT) {
2581 if (ASIC_IS_DCE3(rdev)) {
2582 tmp = RREG32(DC_HPD3_INT_CONTROL);
2583 tmp |= DC_HPDx_INT_ACK;
2584 WREG32(DC_HPD3_INT_CONTROL, tmp);
2585 } else {
2586 tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL);
2587 tmp |= DC_HPDx_INT_ACK;
2588 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
2589 }
2590 }
2591 if (*disp_int_cont & DC_HPD4_INTERRUPT) {
2592 tmp = RREG32(DC_HPD4_INT_CONTROL);
2593 tmp |= DC_HPDx_INT_ACK;
2594 WREG32(DC_HPD4_INT_CONTROL, tmp);
2595 }
2596 if (ASIC_IS_DCE32(rdev)) {
2597 if (*disp_int_cont2 & DC_HPD5_INTERRUPT) {
2598 tmp = RREG32(DC_HPD5_INT_CONTROL);
2599 tmp |= DC_HPDx_INT_ACK;
2600 WREG32(DC_HPD5_INT_CONTROL, tmp);
2601 }
2602 if (*disp_int_cont2 & DC_HPD6_INTERRUPT) {
2603 tmp = RREG32(DC_HPD5_INT_CONTROL);
2604 tmp |= DC_HPDx_INT_ACK;
2605 WREG32(DC_HPD6_INT_CONTROL, tmp);
2606 }
2607 }
2608}
2609
2610void r600_irq_disable(struct radeon_device *rdev)
2611{
2612 u32 disp_int, disp_int_cont, disp_int_cont2;
2613
2614 r600_disable_interrupts(rdev);
2615 /* Wait and acknowledge irq */
2616 mdelay(1);
2617 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2618 r600_disable_interrupt_state(rdev);
2619}
2620
2621static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
2622{
2623 u32 wptr, tmp;
1801 2624
2625 /* XXX use writeback */
2626 wptr = RREG32(IH_RB_WPTR);
1802 2627
2628 if (wptr & RB_OVERFLOW) {
2629 WARN_ON(1);
2630 /* XXX deal with overflow */
2631 DRM_ERROR("IH RB overflow\n");
2632 tmp = RREG32(IH_RB_CNTL);
2633 tmp |= IH_WPTR_OVERFLOW_CLEAR;
2634 WREG32(IH_RB_CNTL, tmp);
2635 }
2636 wptr = wptr & WPTR_OFFSET_MASK;
2637
2638 return wptr;
2639}
2640
2641/* r600 IV Ring
2642 * Each IV ring entry is 128 bits:
2643 * [7:0] - interrupt source id
2644 * [31:8] - reserved
2645 * [59:32] - interrupt source data
2646 * [127:60] - reserved
2647 *
2648 * The basic interrupt vector entries
2649 * are decoded as follows:
2650 * src_id src_data description
2651 * 1 0 D1 Vblank
2652 * 1 1 D1 Vline
2653 * 5 0 D2 Vblank
2654 * 5 1 D2 Vline
2655 * 19 0 FP Hot plug detection A
2656 * 19 1 FP Hot plug detection B
2657 * 19 2 DAC A auto-detection
2658 * 19 3 DAC B auto-detection
2659 * 176 - CP_INT RB
2660 * 177 - CP_INT IB1
2661 * 178 - CP_INT IB2
2662 * 181 - EOP Interrupt
2663 * 233 - GUI Idle
2664 *
2665 * Note, these are based on r600 and may need to be
2666 * adjusted or added to on newer asics
2667 */
2668
2669int r600_irq_process(struct radeon_device *rdev)
2670{
2671 u32 wptr = r600_get_ih_wptr(rdev);
2672 u32 rptr = rdev->ih.rptr;
2673 u32 src_id, src_data;
2674 u32 last_entry = rdev->ih.ring_size - 16;
2675 u32 ring_index, disp_int, disp_int_cont, disp_int_cont2;
2676 unsigned long flags;
2677 bool queue_hotplug = false;
2678
2679 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2680
2681 spin_lock_irqsave(&rdev->ih.lock, flags);
2682
2683 if (rptr == wptr) {
2684 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2685 return IRQ_NONE;
2686 }
2687 if (rdev->shutdown) {
2688 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2689 return IRQ_NONE;
2690 }
2691
2692restart_ih:
2693 /* display interrupts */
2694 r600_irq_ack(rdev, &disp_int, &disp_int_cont, &disp_int_cont2);
2695
2696 rdev->ih.wptr = wptr;
2697 while (rptr != wptr) {
2698 /* wptr/rptr are in bytes! */
2699 ring_index = rptr / 4;
2700 src_id = rdev->ih.ring[ring_index] & 0xff;
2701 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff;
2702
2703 switch (src_id) {
2704 case 1: /* D1 vblank/vline */
2705 switch (src_data) {
2706 case 0: /* D1 vblank */
2707 if (disp_int & LB_D1_VBLANK_INTERRUPT) {
2708 drm_handle_vblank(rdev->ddev, 0);
2709 disp_int &= ~LB_D1_VBLANK_INTERRUPT;
2710 DRM_DEBUG("IH: D1 vblank\n");
2711 }
2712 break;
2713 case 1: /* D1 vline */
2714 if (disp_int & LB_D1_VLINE_INTERRUPT) {
2715 disp_int &= ~LB_D1_VLINE_INTERRUPT;
2716 DRM_DEBUG("IH: D1 vline\n");
2717 }
2718 break;
2719 default:
2720 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2721 break;
2722 }
2723 break;
2724 case 5: /* D2 vblank/vline */
2725 switch (src_data) {
2726 case 0: /* D2 vblank */
2727 if (disp_int & LB_D2_VBLANK_INTERRUPT) {
2728 drm_handle_vblank(rdev->ddev, 1);
2729 disp_int &= ~LB_D2_VBLANK_INTERRUPT;
2730 DRM_DEBUG("IH: D2 vblank\n");
2731 }
2732 break;
2733 case 1: /* D1 vline */
2734 if (disp_int & LB_D2_VLINE_INTERRUPT) {
2735 disp_int &= ~LB_D2_VLINE_INTERRUPT;
2736 DRM_DEBUG("IH: D2 vline\n");
2737 }
2738 break;
2739 default:
2740 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2741 break;
2742 }
2743 break;
2744 case 19: /* HPD/DAC hotplug */
2745 switch (src_data) {
2746 case 0:
2747 if (disp_int & DC_HPD1_INTERRUPT) {
2748 disp_int &= ~DC_HPD1_INTERRUPT;
2749 queue_hotplug = true;
2750 DRM_DEBUG("IH: HPD1\n");
2751 }
2752 break;
2753 case 1:
2754 if (disp_int & DC_HPD2_INTERRUPT) {
2755 disp_int &= ~DC_HPD2_INTERRUPT;
2756 queue_hotplug = true;
2757 DRM_DEBUG("IH: HPD2\n");
2758 }
2759 break;
2760 case 4:
2761 if (disp_int_cont & DC_HPD3_INTERRUPT) {
2762 disp_int_cont &= ~DC_HPD3_INTERRUPT;
2763 queue_hotplug = true;
2764 DRM_DEBUG("IH: HPD3\n");
2765 }
2766 break;
2767 case 5:
2768 if (disp_int_cont & DC_HPD4_INTERRUPT) {
2769 disp_int_cont &= ~DC_HPD4_INTERRUPT;
2770 queue_hotplug = true;
2771 DRM_DEBUG("IH: HPD4\n");
2772 }
2773 break;
2774 case 10:
2775 if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
2776 disp_int_cont &= ~DC_HPD5_INTERRUPT;
2777 queue_hotplug = true;
2778 DRM_DEBUG("IH: HPD5\n");
2779 }
2780 break;
2781 case 12:
2782 if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
2783 disp_int_cont &= ~DC_HPD6_INTERRUPT;
2784 queue_hotplug = true;
2785 DRM_DEBUG("IH: HPD6\n");
2786 }
2787 break;
2788 default:
2789 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2790 break;
2791 }
2792 break;
2793 case 176: /* CP_INT in ring buffer */
2794 case 177: /* CP_INT in IB1 */
2795 case 178: /* CP_INT in IB2 */
2796 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
2797 radeon_fence_process(rdev);
2798 break;
2799 case 181: /* CP EOP event */
2800 DRM_DEBUG("IH: CP EOP\n");
2801 break;
2802 default:
2803 DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
2804 break;
2805 }
2806
2807 /* wptr/rptr are in bytes! */
2808 if (rptr == last_entry)
2809 rptr = 0;
2810 else
2811 rptr += 16;
2812 }
2813 /* make sure wptr hasn't changed while processing */
2814 wptr = r600_get_ih_wptr(rdev);
2815 if (wptr != rdev->ih.wptr)
2816 goto restart_ih;
2817 if (queue_hotplug)
2818 queue_work(rdev->wq, &rdev->hotplug_work);
2819 rdev->ih.rptr = rptr;
2820 WREG32(IH_RB_RPTR, rdev->ih.rptr);
2821 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2822 return IRQ_HANDLED;
2823}
1803 2824
1804/* 2825/*
1805 * Debugfs info 2826 * Debugfs info
@@ -1811,21 +2832,21 @@ static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
1811 struct drm_info_node *node = (struct drm_info_node *) m->private; 2832 struct drm_info_node *node = (struct drm_info_node *) m->private;
1812 struct drm_device *dev = node->minor->dev; 2833 struct drm_device *dev = node->minor->dev;
1813 struct radeon_device *rdev = dev->dev_private; 2834 struct radeon_device *rdev = dev->dev_private;
1814 uint32_t rdp, wdp;
1815 unsigned count, i, j; 2835 unsigned count, i, j;
1816 2836
1817 radeon_ring_free_size(rdev); 2837 radeon_ring_free_size(rdev);
1818 rdp = RREG32(CP_RB_RPTR); 2838 count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
1819 wdp = RREG32(CP_RB_WPTR);
1820 count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1821 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT)); 2839 seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
1822 seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp); 2840 seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
1823 seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp); 2841 seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
2842 seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
2843 seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
1824 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw); 2844 seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1825 seq_printf(m, "%u dwords in ring\n", count); 2845 seq_printf(m, "%u dwords in ring\n", count);
2846 i = rdev->cp.rptr;
1826 for (j = 0; j <= count; j++) { 2847 for (j = 0; j <= count; j++) {
1827 i = (rdp + j) & rdev->cp.ptr_mask;
1828 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]); 2848 seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
2849 i = (i + 1) & rdev->cp.ptr_mask;
1829 } 2850 }
1830 return 0; 2851 return 0;
1831} 2852}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index dbf716e1fbf3..9aecafb51b66 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
473 obj_size += r6xx_ps_size * 4; 473 obj_size += r6xx_ps_size * 4;
474 obj_size = ALIGN(obj_size, 256); 474 obj_size = ALIGN(obj_size, 256);
475 475
476 r = radeon_object_create(rdev, NULL, obj_size, 476 r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
477 true, RADEON_GEM_DOMAIN_VRAM, 477 &rdev->r600_blit.shader_obj);
478 false, &rdev->r600_blit.shader_obj);
479 if (r) { 478 if (r) {
480 DRM_ERROR("r600 failed to allocate shader\n"); 479 DRM_ERROR("r600 failed to allocate shader\n");
481 return r; 480 return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
485 obj_size, 484 obj_size,
486 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); 485 rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
487 486
488 r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); 487 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
488 if (unlikely(r != 0))
489 return r;
490 r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
489 if (r) { 491 if (r) {
490 DRM_ERROR("failed to map blit object %d\n", r); 492 DRM_ERROR("failed to map blit object %d\n", r);
491 return r; 493 return r;
492 } 494 }
493
494 if (rdev->family >= CHIP_RV770) 495 if (rdev->family >= CHIP_RV770)
495 memcpy_toio(ptr + rdev->r600_blit.state_offset, 496 memcpy_toio(ptr + rdev->r600_blit.state_offset,
496 r7xx_default_state, rdev->r600_blit.state_len * 4); 497 r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
500 if (num_packet2s) 501 if (num_packet2s)
501 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 502 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
502 packet2s, num_packet2s * 4); 503 packet2s, num_packet2s * 4);
503
504
505 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 504 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
506 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 505 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
507 506 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
508 radeon_object_kunmap(rdev->r600_blit.shader_obj); 507 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
509 return 0; 508 return 0;
510} 509}
511 510
512void r600_blit_fini(struct radeon_device *rdev) 511void r600_blit_fini(struct radeon_device *rdev)
513{ 512{
514 radeon_object_unpin(rdev->r600_blit.shader_obj); 513 int r;
515 radeon_object_unref(&rdev->r600_blit.shader_obj); 514
515 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
516 if (unlikely(r != 0)) {
517 dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
518 goto out_unref;
519 }
520 radeon_bo_unpin(rdev->r600_blit.shader_obj);
521 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
522out_unref:
523 radeon_bo_unref(&rdev->r600_blit.shader_obj);
516} 524}
517 525
518int r600_vb_ib_get(struct radeon_device *rdev) 526int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
569 ring_size = num_loops * dwords_per_loop; 577 ring_size = num_loops * dwords_per_loop;
570 /* set default + shaders */ 578 /* set default + shaders */
571 ring_size += 40; /* shaders + def state */ 579 ring_size += 40; /* shaders + def state */
572 ring_size += 3; /* fence emit for VB IB */ 580 ring_size += 5; /* fence emit for VB IB */
573 ring_size += 5; /* done copy */ 581 ring_size += 5; /* done copy */
574 ring_size += 3; /* fence emit for done copy */ 582 ring_size += 5; /* fence emit for done copy */
575 r = radeon_ring_lock(rdev, ring_size); 583 r = radeon_ring_lock(rdev, ring_size);
576 WARN_ON(r); 584 WARN_ON(r);
577 585
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 27ab428b149b..05894edadab4 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -456,7 +456,215 @@
456#define WAIT_2D_IDLECLEAN_bit (1 << 16) 456#define WAIT_2D_IDLECLEAN_bit (1 << 16)
457#define WAIT_3D_IDLECLEAN_bit (1 << 17) 457#define WAIT_3D_IDLECLEAN_bit (1 << 17)
458 458
459 459#define IH_RB_CNTL 0x3e00
460# define IH_RB_ENABLE (1 << 0)
461# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
462# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
463# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
464# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
465# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
466# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
467#define IH_RB_BASE 0x3e04
468#define IH_RB_RPTR 0x3e08
469#define IH_RB_WPTR 0x3e0c
470# define RB_OVERFLOW (1 << 0)
471# define WPTR_OFFSET_MASK 0x3fffc
472#define IH_RB_WPTR_ADDR_HI 0x3e10
473#define IH_RB_WPTR_ADDR_LO 0x3e14
474#define IH_CNTL 0x3e18
475# define ENABLE_INTR (1 << 0)
476# define IH_MC_SWAP(x) ((x) << 2)
477# define IH_MC_SWAP_NONE 0
478# define IH_MC_SWAP_16BIT 1
479# define IH_MC_SWAP_32BIT 2
480# define IH_MC_SWAP_64BIT 3
481# define RPTR_REARM (1 << 4)
482# define MC_WRREQ_CREDIT(x) ((x) << 15)
483# define MC_WR_CLEAN_CNT(x) ((x) << 20)
484
485#define RLC_CNTL 0x3f00
486# define RLC_ENABLE (1 << 0)
487#define RLC_HB_BASE 0x3f10
488#define RLC_HB_CNTL 0x3f0c
489#define RLC_HB_RPTR 0x3f20
490#define RLC_HB_WPTR 0x3f1c
491#define RLC_HB_WPTR_LSB_ADDR 0x3f14
492#define RLC_HB_WPTR_MSB_ADDR 0x3f18
493#define RLC_MC_CNTL 0x3f44
494#define RLC_UCODE_CNTL 0x3f48
495#define RLC_UCODE_ADDR 0x3f2c
496#define RLC_UCODE_DATA 0x3f30
497
498#define SRBM_SOFT_RESET 0xe60
499# define SOFT_RESET_RLC (1 << 13)
500
501#define CP_INT_CNTL 0xc124
502# define CNTX_BUSY_INT_ENABLE (1 << 19)
503# define CNTX_EMPTY_INT_ENABLE (1 << 20)
504# define SCRATCH_INT_ENABLE (1 << 25)
505# define TIME_STAMP_INT_ENABLE (1 << 26)
506# define IB2_INT_ENABLE (1 << 29)
507# define IB1_INT_ENABLE (1 << 30)
508# define RB_INT_ENABLE (1 << 31)
509#define CP_INT_STATUS 0xc128
510# define SCRATCH_INT_STAT (1 << 25)
511# define TIME_STAMP_INT_STAT (1 << 26)
512# define IB2_INT_STAT (1 << 29)
513# define IB1_INT_STAT (1 << 30)
514# define RB_INT_STAT (1 << 31)
515
516#define GRBM_INT_CNTL 0x8060
517# define RDERR_INT_ENABLE (1 << 0)
518# define WAIT_COUNT_TIMEOUT_INT_ENABLE (1 << 1)
519# define GUI_IDLE_INT_ENABLE (1 << 19)
520
521#define INTERRUPT_CNTL 0x5468
522# define IH_DUMMY_RD_OVERRIDE (1 << 0)
523# define IH_DUMMY_RD_EN (1 << 1)
524# define IH_REQ_NONSNOOP_EN (1 << 3)
525# define GEN_IH_INT_EN (1 << 8)
526#define INTERRUPT_CNTL2 0x546c
527
528#define D1MODE_VBLANK_STATUS 0x6534
529#define D2MODE_VBLANK_STATUS 0x6d34
530# define DxMODE_VBLANK_OCCURRED (1 << 0)
531# define DxMODE_VBLANK_ACK (1 << 4)
532# define DxMODE_VBLANK_STAT (1 << 12)
533# define DxMODE_VBLANK_INTERRUPT (1 << 16)
534# define DxMODE_VBLANK_INTERRUPT_TYPE (1 << 17)
535#define D1MODE_VLINE_STATUS 0x653c
536#define D2MODE_VLINE_STATUS 0x6d3c
537# define DxMODE_VLINE_OCCURRED (1 << 0)
538# define DxMODE_VLINE_ACK (1 << 4)
539# define DxMODE_VLINE_STAT (1 << 12)
540# define DxMODE_VLINE_INTERRUPT (1 << 16)
541# define DxMODE_VLINE_INTERRUPT_TYPE (1 << 17)
542#define DxMODE_INT_MASK 0x6540
543# define D1MODE_VBLANK_INT_MASK (1 << 0)
544# define D1MODE_VLINE_INT_MASK (1 << 4)
545# define D2MODE_VBLANK_INT_MASK (1 << 8)
546# define D2MODE_VLINE_INT_MASK (1 << 12)
547#define DCE3_DISP_INTERRUPT_STATUS 0x7ddc
548# define DC_HPD1_INTERRUPT (1 << 18)
549# define DC_HPD2_INTERRUPT (1 << 19)
550#define DISP_INTERRUPT_STATUS 0x7edc
551# define LB_D1_VLINE_INTERRUPT (1 << 2)
552# define LB_D2_VLINE_INTERRUPT (1 << 3)
553# define LB_D1_VBLANK_INTERRUPT (1 << 4)
554# define LB_D2_VBLANK_INTERRUPT (1 << 5)
555# define DACA_AUTODETECT_INTERRUPT (1 << 16)
556# define DACB_AUTODETECT_INTERRUPT (1 << 17)
557# define DC_HOT_PLUG_DETECT1_INTERRUPT (1 << 18)
558# define DC_HOT_PLUG_DETECT2_INTERRUPT (1 << 19)
559# define DC_I2C_SW_DONE_INTERRUPT (1 << 20)
560# define DC_I2C_HW_DONE_INTERRUPT (1 << 21)
561#define DISP_INTERRUPT_STATUS_CONTINUE 0x7ee8
562#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE 0x7de8
563# define DC_HPD4_INTERRUPT (1 << 14)
564# define DC_HPD4_RX_INTERRUPT (1 << 15)
565# define DC_HPD3_INTERRUPT (1 << 28)
566# define DC_HPD1_RX_INTERRUPT (1 << 29)
567# define DC_HPD2_RX_INTERRUPT (1 << 30)
568#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2 0x7dec
569# define DC_HPD3_RX_INTERRUPT (1 << 0)
570# define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 1)
571# define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 2)
572# define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT (1 << 3)
573# define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT (1 << 4)
574# define AUX1_SW_DONE_INTERRUPT (1 << 5)
575# define AUX1_LS_DONE_INTERRUPT (1 << 6)
576# define AUX2_SW_DONE_INTERRUPT (1 << 7)
577# define AUX2_LS_DONE_INTERRUPT (1 << 8)
578# define AUX3_SW_DONE_INTERRUPT (1 << 9)
579# define AUX3_LS_DONE_INTERRUPT (1 << 10)
580# define AUX4_SW_DONE_INTERRUPT (1 << 11)
581# define AUX4_LS_DONE_INTERRUPT (1 << 12)
582# define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 13)
583# define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT (1 << 14)
584/* DCE 3.2 */
585# define AUX5_SW_DONE_INTERRUPT (1 << 15)
586# define AUX5_LS_DONE_INTERRUPT (1 << 16)
587# define AUX6_SW_DONE_INTERRUPT (1 << 17)
588# define AUX6_LS_DONE_INTERRUPT (1 << 18)
589# define DC_HPD5_INTERRUPT (1 << 19)
590# define DC_HPD5_RX_INTERRUPT (1 << 20)
591# define DC_HPD6_INTERRUPT (1 << 21)
592# define DC_HPD6_RX_INTERRUPT (1 << 22)
593
594#define DACA_AUTO_DETECT_CONTROL 0x7828
595#define DACB_AUTO_DETECT_CONTROL 0x7a28
596#define DCE3_DACA_AUTO_DETECT_CONTROL 0x7028
597#define DCE3_DACB_AUTO_DETECT_CONTROL 0x7128
598# define DACx_AUTODETECT_MODE(x) ((x) << 0)
599# define DACx_AUTODETECT_MODE_NONE 0
600# define DACx_AUTODETECT_MODE_CONNECT 1
601# define DACx_AUTODETECT_MODE_DISCONNECT 2
602# define DACx_AUTODETECT_FRAME_TIME_COUNTER(x) ((x) << 8)
603/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
604# define DACx_AUTODETECT_CHECK_MASK(x) ((x) << 16)
605
606#define DCE3_DACA_AUTODETECT_INT_CONTROL 0x7038
607#define DCE3_DACB_AUTODETECT_INT_CONTROL 0x7138
608#define DACA_AUTODETECT_INT_CONTROL 0x7838
609#define DACB_AUTODETECT_INT_CONTROL 0x7a38
610# define DACx_AUTODETECT_ACK (1 << 0)
611# define DACx_AUTODETECT_INT_ENABLE (1 << 16)
612
613#define DC_HOT_PLUG_DETECT1_CONTROL 0x7d00
614#define DC_HOT_PLUG_DETECT2_CONTROL 0x7d10
615#define DC_HOT_PLUG_DETECT3_CONTROL 0x7d24
616# define DC_HOT_PLUG_DETECTx_EN (1 << 0)
617
618#define DC_HOT_PLUG_DETECT1_INT_STATUS 0x7d04
619#define DC_HOT_PLUG_DETECT2_INT_STATUS 0x7d14
620#define DC_HOT_PLUG_DETECT3_INT_STATUS 0x7d28
621# define DC_HOT_PLUG_DETECTx_INT_STATUS (1 << 0)
622# define DC_HOT_PLUG_DETECTx_SENSE (1 << 1)
623
624/* DCE 3.0 */
625#define DC_HPD1_INT_STATUS 0x7d00
626#define DC_HPD2_INT_STATUS 0x7d0c
627#define DC_HPD3_INT_STATUS 0x7d18
628#define DC_HPD4_INT_STATUS 0x7d24
629/* DCE 3.2 */
630#define DC_HPD5_INT_STATUS 0x7dc0
631#define DC_HPD6_INT_STATUS 0x7df4
632# define DC_HPDx_INT_STATUS (1 << 0)
633# define DC_HPDx_SENSE (1 << 1)
634# define DC_HPDx_RX_INT_STATUS (1 << 8)
635
636#define DC_HOT_PLUG_DETECT1_INT_CONTROL 0x7d08
637#define DC_HOT_PLUG_DETECT2_INT_CONTROL 0x7d18
638#define DC_HOT_PLUG_DETECT3_INT_CONTROL 0x7d2c
639# define DC_HOT_PLUG_DETECTx_INT_ACK (1 << 0)
640# define DC_HOT_PLUG_DETECTx_INT_POLARITY (1 << 8)
641# define DC_HOT_PLUG_DETECTx_INT_EN (1 << 16)
642/* DCE 3.0 */
643#define DC_HPD1_INT_CONTROL 0x7d04
644#define DC_HPD2_INT_CONTROL 0x7d10
645#define DC_HPD3_INT_CONTROL 0x7d1c
646#define DC_HPD4_INT_CONTROL 0x7d28
647/* DCE 3.2 */
648#define DC_HPD5_INT_CONTROL 0x7dc4
649#define DC_HPD6_INT_CONTROL 0x7df8
650# define DC_HPDx_INT_ACK (1 << 0)
651# define DC_HPDx_INT_POLARITY (1 << 8)
652# define DC_HPDx_INT_EN (1 << 16)
653# define DC_HPDx_RX_INT_ACK (1 << 20)
654# define DC_HPDx_RX_INT_EN (1 << 24)
655
656/* DCE 3.0 */
657#define DC_HPD1_CONTROL 0x7d08
658#define DC_HPD2_CONTROL 0x7d14
659#define DC_HPD3_CONTROL 0x7d20
660#define DC_HPD4_CONTROL 0x7d2c
661/* DCE 3.2 */
662#define DC_HPD5_CONTROL 0x7dc8
663#define DC_HPD6_CONTROL 0x7dfc
664# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
665# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
666/* DCE 3.2 */
667# define DC_HPDx_EN (1 << 28)
460 668
461/* 669/*
462 * PM4 670 * PM4
@@ -500,7 +708,6 @@
500#define PACKET3_WAIT_REG_MEM 0x3C 708#define PACKET3_WAIT_REG_MEM 0x3C
501#define PACKET3_MEM_WRITE 0x3D 709#define PACKET3_MEM_WRITE 0x3D
502#define PACKET3_INDIRECT_BUFFER 0x32 710#define PACKET3_INDIRECT_BUFFER 0x32
503#define PACKET3_CP_INTERRUPT 0x40
504#define PACKET3_SURFACE_SYNC 0x43 711#define PACKET3_SURFACE_SYNC 0x43
505# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 712# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
506# define PACKET3_TC_ACTION_ENA (1 << 23) 713# define PACKET3_TC_ACTION_ENA (1 << 23)
@@ -674,4 +881,5 @@
674#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16) 881#define S_000E60_SOFT_RESET_TSC(x) (((x) & 1) << 16)
675#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17) 882#define S_000E60_SOFT_RESET_VMC(x) (((x) & 1) << 17)
676 883
884#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
677#endif 885#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 224506a2f7b1..c938bb54123c 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -28,8 +28,6 @@
28#ifndef __RADEON_H__ 28#ifndef __RADEON_H__
29#define __RADEON_H__ 29#define __RADEON_H__
30 30
31#include "radeon_object.h"
32
33/* TODO: Here are things that needs to be done : 31/* TODO: Here are things that needs to be done :
34 * - surface allocator & initializer : (bit like scratch reg) should 32 * - surface allocator & initializer : (bit like scratch reg) should
35 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings 33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
67#include <linux/list.h> 65#include <linux/list.h>
68#include <linux/kref.h> 66#include <linux/kref.h>
69 67
68#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
72
70#include "radeon_family.h" 73#include "radeon_family.h"
71#include "radeon_mode.h" 74#include "radeon_mode.h"
72#include "radeon_reg.h" 75#include "radeon_reg.h"
@@ -85,6 +88,7 @@ extern int radeon_benchmarking;
85extern int radeon_testing; 88extern int radeon_testing;
86extern int radeon_connector_table; 89extern int radeon_connector_table;
87extern int radeon_tv; 90extern int radeon_tv;
91extern int radeon_new_pll;
88 92
89/* 93/*
90 * Copy from radeon_drv.h so we don't have to include both and have conflicting 94 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -186,76 +190,62 @@ void radeon_fence_unref(struct radeon_fence **fence);
186 * Tiling registers 190 * Tiling registers
187 */ 191 */
188struct radeon_surface_reg { 192struct radeon_surface_reg {
189 struct radeon_object *robj; 193 struct radeon_bo *bo;
190}; 194};
191 195
192#define RADEON_GEM_MAX_SURFACES 8 196#define RADEON_GEM_MAX_SURFACES 8
193 197
194/* 198/*
195 * Radeon buffer. 199 * TTM.
196 */ 200 */
197struct radeon_object; 201struct radeon_mman {
202 struct ttm_bo_global_ref bo_global_ref;
203 struct ttm_global_reference mem_global_ref;
204 bool mem_global_referenced;
205 struct ttm_bo_device bdev;
206};
207
208struct radeon_bo {
209 /* Protected by gem.mutex */
210 struct list_head list;
211 /* Protected by tbo.reserved */
212 u32 placements[3];
213 struct ttm_placement placement;
214 struct ttm_buffer_object tbo;
215 struct ttm_bo_kmap_obj kmap;
216 unsigned pin_count;
217 void *kptr;
218 u32 tiling_flags;
219 u32 pitch;
220 int surface_reg;
221 /* Constant after initialization */
222 struct radeon_device *rdev;
223 struct drm_gem_object *gobj;
224};
198 225
199struct radeon_object_list { 226struct radeon_bo_list {
200 struct list_head list; 227 struct list_head list;
201 struct radeon_object *robj; 228 struct radeon_bo *bo;
202 uint64_t gpu_offset; 229 uint64_t gpu_offset;
203 unsigned rdomain; 230 unsigned rdomain;
204 unsigned wdomain; 231 unsigned wdomain;
205 uint32_t tiling_flags; 232 u32 tiling_flags;
206}; 233};
207 234
208int radeon_object_init(struct radeon_device *rdev);
209void radeon_object_fini(struct radeon_device *rdev);
210int radeon_object_create(struct radeon_device *rdev,
211 struct drm_gem_object *gobj,
212 unsigned long size,
213 bool kernel,
214 uint32_t domain,
215 bool interruptible,
216 struct radeon_object **robj_ptr);
217int radeon_object_kmap(struct radeon_object *robj, void **ptr);
218void radeon_object_kunmap(struct radeon_object *robj);
219void radeon_object_unref(struct radeon_object **robj);
220int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
221 uint64_t *gpu_addr);
222void radeon_object_unpin(struct radeon_object *robj);
223int radeon_object_wait(struct radeon_object *robj);
224int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
225int radeon_object_evict_vram(struct radeon_device *rdev);
226int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
227void radeon_object_force_delete(struct radeon_device *rdev);
228void radeon_object_list_add_object(struct radeon_object_list *lobj,
229 struct list_head *head);
230int radeon_object_list_validate(struct list_head *head, void *fence);
231void radeon_object_list_unvalidate(struct list_head *head);
232void radeon_object_list_clean(struct list_head *head);
233int radeon_object_fbdev_mmap(struct radeon_object *robj,
234 struct vm_area_struct *vma);
235unsigned long radeon_object_size(struct radeon_object *robj);
236void radeon_object_clear_surface_reg(struct radeon_object *robj);
237int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
238 bool force_drop);
239void radeon_object_set_tiling_flags(struct radeon_object *robj,
240 uint32_t tiling_flags, uint32_t pitch);
241void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
242void radeon_bo_move_notify(struct ttm_buffer_object *bo,
243 struct ttm_mem_reg *mem);
244void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
245/* 235/*
246 * GEM objects. 236 * GEM objects.
247 */ 237 */
248struct radeon_gem { 238struct radeon_gem {
239 struct mutex mutex;
249 struct list_head objects; 240 struct list_head objects;
250}; 241};
251 242
252int radeon_gem_init(struct radeon_device *rdev); 243int radeon_gem_init(struct radeon_device *rdev);
253void radeon_gem_fini(struct radeon_device *rdev); 244void radeon_gem_fini(struct radeon_device *rdev);
254int radeon_gem_object_create(struct radeon_device *rdev, int size, 245int radeon_gem_object_create(struct radeon_device *rdev, int size,
255 int alignment, int initial_domain, 246 int alignment, int initial_domain,
256 bool discardable, bool kernel, 247 bool discardable, bool kernel,
257 bool interruptible, 248 struct drm_gem_object **obj);
258 struct drm_gem_object **obj);
259int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 249int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
260 uint64_t *gpu_addr); 250 uint64_t *gpu_addr);
261void radeon_gem_object_unpin(struct drm_gem_object *obj); 251void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +261,7 @@ struct radeon_gart_table_ram {
271}; 261};
272 262
273struct radeon_gart_table_vram { 263struct radeon_gart_table_vram {
274 struct radeon_object *robj; 264 struct radeon_bo *robj;
275 volatile uint32_t *ptr; 265 volatile uint32_t *ptr;
276}; 266};
277 267
@@ -352,11 +342,16 @@ struct radeon_irq {
352 bool sw_int; 342 bool sw_int;
353 /* FIXME: use a define max crtc rather than hardcode it */ 343 /* FIXME: use a define max crtc rather than hardcode it */
354 bool crtc_vblank_int[2]; 344 bool crtc_vblank_int[2];
345 /* FIXME: use defines for max hpd/dacs */
346 bool hpd[6];
347 spinlock_t sw_lock;
348 int sw_refcount;
355}; 349};
356 350
357int radeon_irq_kms_init(struct radeon_device *rdev); 351int radeon_irq_kms_init(struct radeon_device *rdev);
358void radeon_irq_kms_fini(struct radeon_device *rdev); 352void radeon_irq_kms_fini(struct radeon_device *rdev);
359 353void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
354void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
360 355
361/* 356/*
362 * CP & ring. 357 * CP & ring.
@@ -376,7 +371,7 @@ struct radeon_ib {
376 */ 371 */
377struct radeon_ib_pool { 372struct radeon_ib_pool {
378 struct mutex mutex; 373 struct mutex mutex;
379 struct radeon_object *robj; 374 struct radeon_bo *robj;
380 struct list_head scheduled_ibs; 375 struct list_head scheduled_ibs;
381 struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; 376 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
382 bool ready; 377 bool ready;
@@ -384,7 +379,7 @@ struct radeon_ib_pool {
384}; 379};
385 380
386struct radeon_cp { 381struct radeon_cp {
387 struct radeon_object *ring_obj; 382 struct radeon_bo *ring_obj;
388 volatile uint32_t *ring; 383 volatile uint32_t *ring;
389 unsigned rptr; 384 unsigned rptr;
390 unsigned wptr; 385 unsigned wptr;
@@ -399,8 +394,25 @@ struct radeon_cp {
399 bool ready; 394 bool ready;
400}; 395};
401 396
397/*
398 * R6xx+ IH ring
399 */
400struct r600_ih {
401 struct radeon_bo *ring_obj;
402 volatile uint32_t *ring;
403 unsigned rptr;
404 unsigned wptr;
405 unsigned wptr_old;
406 unsigned ring_size;
407 uint64_t gpu_addr;
408 uint32_t align_mask;
409 uint32_t ptr_mask;
410 spinlock_t lock;
411 bool enabled;
412};
413
402struct r600_blit { 414struct r600_blit {
403 struct radeon_object *shader_obj; 415 struct radeon_bo *shader_obj;
404 u64 shader_gpu_addr; 416 u64 shader_gpu_addr;
405 u32 vs_offset, ps_offset; 417 u32 vs_offset, ps_offset;
406 u32 state_offset; 418 u32 state_offset;
@@ -430,8 +442,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
430 */ 442 */
431struct radeon_cs_reloc { 443struct radeon_cs_reloc {
432 struct drm_gem_object *gobj; 444 struct drm_gem_object *gobj;
433 struct radeon_object *robj; 445 struct radeon_bo *robj;
434 struct radeon_object_list lobj; 446 struct radeon_bo_list lobj;
435 uint32_t handle; 447 uint32_t handle;
436 uint32_t flags; 448 uint32_t flags;
437}; 449};
@@ -527,7 +539,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
527 * Writeback 539 * Writeback
528 */ 540 */
529struct radeon_wb { 541struct radeon_wb {
530 struct radeon_object *wb_obj; 542 struct radeon_bo *wb_obj;
531 volatile uint32_t *wb; 543 volatile uint32_t *wb;
532 uint64_t gpu_addr; 544 uint64_t gpu_addr;
533}; 545};
@@ -639,6 +651,11 @@ struct radeon_asic {
639 uint32_t offset, uint32_t obj_size); 651 uint32_t offset, uint32_t obj_size);
640 int (*clear_surface_reg)(struct radeon_device *rdev, int reg); 652 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
641 void (*bandwidth_update)(struct radeon_device *rdev); 653 void (*bandwidth_update)(struct radeon_device *rdev);
654 void (*hdp_flush)(struct radeon_device *rdev);
655 void (*hpd_init)(struct radeon_device *rdev);
656 void (*hpd_fini)(struct radeon_device *rdev);
657 bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
658 void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
642}; 659};
643 660
644/* 661/*
@@ -751,9 +768,9 @@ struct radeon_device {
751 uint8_t *bios; 768 uint8_t *bios;
752 bool is_atom_bios; 769 bool is_atom_bios;
753 uint16_t bios_header_start; 770 uint16_t bios_header_start;
754 struct radeon_object *stollen_vga_memory; 771 struct radeon_bo *stollen_vga_memory;
755 struct fb_info *fbdev_info; 772 struct fb_info *fbdev_info;
756 struct radeon_object *fbdev_robj; 773 struct radeon_bo *fbdev_rbo;
757 struct radeon_framebuffer *fbdev_rfb; 774 struct radeon_framebuffer *fbdev_rfb;
758 /* Register mmio */ 775 /* Register mmio */
759 resource_size_t rmmio_base; 776 resource_size_t rmmio_base;
@@ -791,8 +808,12 @@ struct radeon_device {
791 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; 808 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
792 const struct firmware *me_fw; /* all family ME firmware */ 809 const struct firmware *me_fw; /* all family ME firmware */
793 const struct firmware *pfp_fw; /* r6/700 PFP firmware */ 810 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
811 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
794 struct r600_blit r600_blit; 812 struct r600_blit r600_blit;
795 int msi_enabled; /* msi enabled */ 813 int msi_enabled; /* msi enabled */
814 struct r600_ih ih; /* r6/700 interrupt ring */
815 struct workqueue_struct *wq;
816 struct work_struct hotplug_work;
796}; 817};
797 818
798int radeon_device_init(struct radeon_device *rdev, 819int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +850,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
829 } 850 }
830} 851}
831 852
853/*
854 * Cast helper
855 */
856#define to_radeon_fence(p) ((struct radeon_fence *)(p))
832 857
833/* 858/*
834 * Registers read & write functions. 859 * Registers read & write functions.
@@ -965,18 +990,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
965#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) 990#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
966#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 991#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
967#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) 992#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
968#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) 993#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
969#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) 994#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
970#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) 995#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
971#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) 996#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
972#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) 997#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
973#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) 998#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
999#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
1000#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
1001#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
1002#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
1003#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
974 1004
975/* Common functions */ 1005/* Common functions */
976extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); 1006extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
977extern int radeon_modeset_init(struct radeon_device *rdev); 1007extern int radeon_modeset_init(struct radeon_device *rdev);
978extern void radeon_modeset_fini(struct radeon_device *rdev); 1008extern void radeon_modeset_fini(struct radeon_device *rdev);
979extern bool radeon_card_posted(struct radeon_device *rdev); 1009extern bool radeon_card_posted(struct radeon_device *rdev);
1010extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
980extern int radeon_clocks_init(struct radeon_device *rdev); 1011extern int radeon_clocks_init(struct radeon_device *rdev);
981extern void radeon_clocks_fini(struct radeon_device *rdev); 1012extern void radeon_clocks_fini(struct radeon_device *rdev);
982extern void radeon_scratch_init(struct radeon_device *rdev); 1013extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
984extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 1015extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
985extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); 1016extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
986extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); 1017extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1018extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
987 1019
988/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1020/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
989struct r100_mc_save { 1021struct r100_mc_save {
@@ -1021,7 +1053,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
1021extern void r100_vga_render_disable(struct radeon_device *rdev); 1053extern void r100_vga_render_disable(struct radeon_device *rdev);
1022extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, 1054extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1023 struct radeon_cs_packet *pkt, 1055 struct radeon_cs_packet *pkt,
1024 struct radeon_object *robj); 1056 struct radeon_bo *robj);
1025extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, 1057extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1026 struct radeon_cs_packet *pkt, 1058 struct radeon_cs_packet *pkt,
1027 const unsigned *auth, unsigned n, 1059 const unsigned *auth, unsigned n,
@@ -1029,6 +1061,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
1029extern int r100_cs_packet_parse(struct radeon_cs_parser *p, 1061extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
1030 struct radeon_cs_packet *pkt, 1062 struct radeon_cs_packet *pkt,
1031 unsigned idx); 1063 unsigned idx);
1064extern void r100_enable_bm(struct radeon_device *rdev);
1065extern void r100_set_common_regs(struct radeon_device *rdev);
1032 1066
1033/* rv200,rv250,rv280 */ 1067/* rv200,rv250,rv280 */
1034extern void r200_set_safe_registers(struct radeon_device *rdev); 1068extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1138,14 @@ extern void r600_wb_disable(struct radeon_device *rdev);
1104extern void r600_scratch_init(struct radeon_device *rdev); 1138extern void r600_scratch_init(struct radeon_device *rdev);
1105extern int r600_blit_init(struct radeon_device *rdev); 1139extern int r600_blit_init(struct radeon_device *rdev);
1106extern void r600_blit_fini(struct radeon_device *rdev); 1140extern void r600_blit_fini(struct radeon_device *rdev);
1107extern int r600_cp_init_microcode(struct radeon_device *rdev); 1141extern int r600_init_microcode(struct radeon_device *rdev);
1108extern int r600_gpu_reset(struct radeon_device *rdev); 1142extern int r600_gpu_reset(struct radeon_device *rdev);
1143/* r600 irq */
1144extern int r600_irq_init(struct radeon_device *rdev);
1145extern void r600_irq_fini(struct radeon_device *rdev);
1146extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
1147extern int r600_irq_set(struct radeon_device *rdev);
1148
1149#include "radeon_object.h"
1109 1150
1110#endif 1151#endif
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c18fbee387d7..636116bedcb4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
76void r100_bandwidth_update(struct radeon_device *rdev); 76void r100_bandwidth_update(struct radeon_device *rdev);
77void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 77void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
78int r100_ring_test(struct radeon_device *rdev); 78int r100_ring_test(struct radeon_device *rdev);
79void r100_hdp_flush(struct radeon_device *rdev);
80void r100_hpd_init(struct radeon_device *rdev);
81void r100_hpd_fini(struct radeon_device *rdev);
82bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
83void r100_hpd_set_polarity(struct radeon_device *rdev,
84 enum radeon_hpd_id hpd);
79 85
80static struct radeon_asic r100_asic = { 86static struct radeon_asic r100_asic = {
81 .init = &r100_init, 87 .init = &r100_init,
@@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = {
107 .set_surface_reg = r100_set_surface_reg, 113 .set_surface_reg = r100_set_surface_reg,
108 .clear_surface_reg = r100_clear_surface_reg, 114 .clear_surface_reg = r100_clear_surface_reg,
109 .bandwidth_update = &r100_bandwidth_update, 115 .bandwidth_update = &r100_bandwidth_update,
116 .hdp_flush = &r100_hdp_flush,
117 .hpd_init = &r100_hpd_init,
118 .hpd_fini = &r100_hpd_fini,
119 .hpd_sense = &r100_hpd_sense,
120 .hpd_set_polarity = &r100_hpd_set_polarity,
110}; 121};
111 122
112 123
@@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = {
162 .set_surface_reg = r100_set_surface_reg, 173 .set_surface_reg = r100_set_surface_reg,
163 .clear_surface_reg = r100_clear_surface_reg, 174 .clear_surface_reg = r100_clear_surface_reg,
164 .bandwidth_update = &r100_bandwidth_update, 175 .bandwidth_update = &r100_bandwidth_update,
176 .hdp_flush = &r100_hdp_flush,
177 .hpd_init = &r100_hpd_init,
178 .hpd_fini = &r100_hpd_fini,
179 .hpd_sense = &r100_hpd_sense,
180 .hpd_set_polarity = &r100_hpd_set_polarity,
165}; 181};
166 182
167/* 183/*
@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
201 .set_surface_reg = r100_set_surface_reg, 217 .set_surface_reg = r100_set_surface_reg,
202 .clear_surface_reg = r100_clear_surface_reg, 218 .clear_surface_reg = r100_clear_surface_reg,
203 .bandwidth_update = &r100_bandwidth_update, 219 .bandwidth_update = &r100_bandwidth_update,
220 .hdp_flush = &r100_hdp_flush,
221 .hpd_init = &r100_hpd_init,
222 .hpd_fini = &r100_hpd_fini,
223 .hpd_sense = &r100_hpd_sense,
224 .hpd_set_polarity = &r100_hpd_set_polarity,
204}; 225};
205 226
206 227
@@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = {
245 .set_surface_reg = r100_set_surface_reg, 266 .set_surface_reg = r100_set_surface_reg,
246 .clear_surface_reg = r100_clear_surface_reg, 267 .clear_surface_reg = r100_clear_surface_reg,
247 .bandwidth_update = &r100_bandwidth_update, 268 .bandwidth_update = &r100_bandwidth_update,
269 .hdp_flush = &r100_hdp_flush,
270 .hpd_init = &r100_hpd_init,
271 .hpd_fini = &r100_hpd_fini,
272 .hpd_sense = &r100_hpd_sense,
273 .hpd_set_polarity = &r100_hpd_set_polarity,
248}; 274};
249 275
250 276
@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
263uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); 289uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
264void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 290void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
265void rs600_bandwidth_update(struct radeon_device *rdev); 291void rs600_bandwidth_update(struct radeon_device *rdev);
292void rs600_hpd_init(struct radeon_device *rdev);
293void rs600_hpd_fini(struct radeon_device *rdev);
294bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
295void rs600_hpd_set_polarity(struct radeon_device *rdev,
296 enum radeon_hpd_id hpd);
297
266static struct radeon_asic rs600_asic = { 298static struct radeon_asic rs600_asic = {
267 .init = &rs600_init, 299 .init = &rs600_init,
268 .fini = &rs600_fini, 300 .fini = &rs600_fini,
@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
291 .set_pcie_lanes = NULL, 323 .set_pcie_lanes = NULL,
292 .set_clock_gating = &radeon_atom_set_clock_gating, 324 .set_clock_gating = &radeon_atom_set_clock_gating,
293 .bandwidth_update = &rs600_bandwidth_update, 325 .bandwidth_update = &rs600_bandwidth_update,
326 .hdp_flush = &r100_hdp_flush,
327 .hpd_init = &rs600_hpd_init,
328 .hpd_fini = &rs600_hpd_fini,
329 .hpd_sense = &rs600_hpd_sense,
330 .hpd_set_polarity = &rs600_hpd_set_polarity,
294}; 331};
295 332
296 333
@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
334 .set_surface_reg = r100_set_surface_reg, 371 .set_surface_reg = r100_set_surface_reg,
335 .clear_surface_reg = r100_clear_surface_reg, 372 .clear_surface_reg = r100_clear_surface_reg,
336 .bandwidth_update = &rs690_bandwidth_update, 373 .bandwidth_update = &rs690_bandwidth_update,
374 .hdp_flush = &r100_hdp_flush,
375 .hpd_init = &rs600_hpd_init,
376 .hpd_fini = &rs600_hpd_fini,
377 .hpd_sense = &rs600_hpd_sense,
378 .hpd_set_polarity = &rs600_hpd_set_polarity,
337}; 379};
338 380
339 381
@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
381 .set_surface_reg = r100_set_surface_reg, 423 .set_surface_reg = r100_set_surface_reg,
382 .clear_surface_reg = r100_clear_surface_reg, 424 .clear_surface_reg = r100_clear_surface_reg,
383 .bandwidth_update = &rv515_bandwidth_update, 425 .bandwidth_update = &rv515_bandwidth_update,
426 .hdp_flush = &r100_hdp_flush,
427 .hpd_init = &rs600_hpd_init,
428 .hpd_fini = &rs600_hpd_fini,
429 .hpd_sense = &rs600_hpd_sense,
430 .hpd_set_polarity = &rs600_hpd_set_polarity,
384}; 431};
385 432
386 433
@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
419 .set_surface_reg = r100_set_surface_reg, 466 .set_surface_reg = r100_set_surface_reg,
420 .clear_surface_reg = r100_clear_surface_reg, 467 .clear_surface_reg = r100_clear_surface_reg,
421 .bandwidth_update = &rv515_bandwidth_update, 468 .bandwidth_update = &rv515_bandwidth_update,
469 .hdp_flush = &r100_hdp_flush,
470 .hpd_init = &rs600_hpd_init,
471 .hpd_fini = &rs600_hpd_fini,
472 .hpd_sense = &rs600_hpd_sense,
473 .hpd_set_polarity = &rs600_hpd_set_polarity,
422}; 474};
423 475
424/* 476/*
@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
455int r600_copy_blit(struct radeon_device *rdev, 507int r600_copy_blit(struct radeon_device *rdev,
456 uint64_t src_offset, uint64_t dst_offset, 508 uint64_t src_offset, uint64_t dst_offset,
457 unsigned num_pages, struct radeon_fence *fence); 509 unsigned num_pages, struct radeon_fence *fence);
510void r600_hdp_flush(struct radeon_device *rdev);
511void r600_hpd_init(struct radeon_device *rdev);
512void r600_hpd_fini(struct radeon_device *rdev);
513bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
514void r600_hpd_set_polarity(struct radeon_device *rdev,
515 enum radeon_hpd_id hpd);
458 516
459static struct radeon_asic r600_asic = { 517static struct radeon_asic r600_asic = {
460 .init = &r600_init, 518 .init = &r600_init,
@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = {
470 .ring_ib_execute = &r600_ring_ib_execute, 528 .ring_ib_execute = &r600_ring_ib_execute,
471 .irq_set = &r600_irq_set, 529 .irq_set = &r600_irq_set,
472 .irq_process = &r600_irq_process, 530 .irq_process = &r600_irq_process,
531 .get_vblank_counter = &rs600_get_vblank_counter,
473 .fence_ring_emit = &r600_fence_ring_emit, 532 .fence_ring_emit = &r600_fence_ring_emit,
474 .cs_parse = &r600_cs_parse, 533 .cs_parse = &r600_cs_parse,
475 .copy_blit = &r600_copy_blit, 534 .copy_blit = &r600_copy_blit,
@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = {
484 .set_surface_reg = r600_set_surface_reg, 543 .set_surface_reg = r600_set_surface_reg,
485 .clear_surface_reg = r600_clear_surface_reg, 544 .clear_surface_reg = r600_clear_surface_reg,
486 .bandwidth_update = &rv515_bandwidth_update, 545 .bandwidth_update = &rv515_bandwidth_update,
546 .hdp_flush = &r600_hdp_flush,
547 .hpd_init = &r600_hpd_init,
548 .hpd_fini = &r600_hpd_fini,
549 .hpd_sense = &r600_hpd_sense,
550 .hpd_set_polarity = &r600_hpd_set_polarity,
487}; 551};
488 552
489/* 553/*
@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = {
509 .ring_ib_execute = &r600_ring_ib_execute, 573 .ring_ib_execute = &r600_ring_ib_execute,
510 .irq_set = &r600_irq_set, 574 .irq_set = &r600_irq_set,
511 .irq_process = &r600_irq_process, 575 .irq_process = &r600_irq_process,
576 .get_vblank_counter = &rs600_get_vblank_counter,
512 .fence_ring_emit = &r600_fence_ring_emit, 577 .fence_ring_emit = &r600_fence_ring_emit,
513 .cs_parse = &r600_cs_parse, 578 .cs_parse = &r600_cs_parse,
514 .copy_blit = &r600_copy_blit, 579 .copy_blit = &r600_copy_blit,
@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = {
523 .set_surface_reg = r600_set_surface_reg, 588 .set_surface_reg = r600_set_surface_reg,
524 .clear_surface_reg = r600_clear_surface_reg, 589 .clear_surface_reg = r600_clear_surface_reg,
525 .bandwidth_update = &rv515_bandwidth_update, 590 .bandwidth_update = &rv515_bandwidth_update,
591 .hdp_flush = &r600_hdp_flush,
592 .hpd_init = &r600_hpd_init,
593 .hpd_fini = &r600_hpd_fini,
594 .hpd_sense = &r600_hpd_sense,
595 .hpd_set_polarity = &r600_hpd_set_polarity,
526}; 596};
527 597
528#endif 598#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2ed88a820935..12a0c760e7ff 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
47 int connector_type, 47 int connector_type,
48 struct radeon_i2c_bus_rec *i2c_bus, 48 struct radeon_i2c_bus_rec *i2c_bus,
49 bool linkb, uint32_t igp_lane_info, 49 bool linkb, uint32_t igp_lane_info,
50 uint16_t connector_object_id); 50 uint16_t connector_object_id,
51 struct radeon_hpd *hpd);
51 52
52/* from radeon_legacy_encoder.c */ 53/* from radeon_legacy_encoder.c */
53extern void 54extern void
@@ -60,16 +61,16 @@ union atom_supported_devices {
60 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 61 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
61}; 62};
62 63
63static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device 64static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
64 *dev, uint8_t id) 65 uint8_t id)
65{ 66{
66 struct radeon_device *rdev = dev->dev_private;
67 struct atom_context *ctx = rdev->mode_info.atom_context; 67 struct atom_context *ctx = rdev->mode_info.atom_context;
68 ATOM_GPIO_I2C_ASSIGMENT gpio; 68 ATOM_GPIO_I2C_ASSIGMENT *gpio;
69 struct radeon_i2c_bus_rec i2c; 69 struct radeon_i2c_bus_rec i2c;
70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); 70 int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
71 struct _ATOM_GPIO_I2C_INFO *i2c_info; 71 struct _ATOM_GPIO_I2C_INFO *i2c_info;
72 uint16_t data_offset; 72 uint16_t data_offset;
73 int i;
73 74
74 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 75 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
75 i2c.valid = false; 76 i2c.valid = false;
@@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
78 79
79 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 80 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
80 81
81 gpio = i2c_info->asGPIO_Info[id]; 82
82 83 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
83 i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4; 84 gpio = &i2c_info->asGPIO_Info[i];
84 i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4; 85
85 i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4; 86 if (gpio->sucI2cId.ucAccess == id) {
86 i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4; 87 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
87 i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4; 88 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
88 i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4; 89 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
89 i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4; 90 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
90 i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4; 91 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
91 i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift); 92 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
92 i2c.mask_data_mask = (1 << gpio.ucDataMaskShift); 93 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
93 i2c.put_clk_mask = (1 << gpio.ucClkEnShift); 94 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
94 i2c.put_data_mask = (1 << gpio.ucDataEnShift); 95 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
95 i2c.get_clk_mask = (1 << gpio.ucClkY_Shift); 96 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
96 i2c.get_data_mask = (1 << gpio.ucDataY_Shift); 97 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
97 i2c.a_clk_mask = (1 << gpio.ucClkA_Shift); 98 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
98 i2c.a_data_mask = (1 << gpio.ucDataA_Shift); 99 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
99 i2c.valid = true; 100 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
101 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
102 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
103
104 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
105 i2c.hw_capable = true;
106 else
107 i2c.hw_capable = false;
108
109 if (gpio->sucI2cId.ucAccess == 0xa0)
110 i2c.mm_i2c = true;
111 else
112 i2c.mm_i2c = false;
113
114 i2c.i2c_id = gpio->sucI2cId.ucAccess;
115
116 i2c.valid = true;
117 }
118 }
100 119
101 return i2c; 120 return i2c;
102} 121}
103 122
123static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
124 u8 id)
125{
126 struct atom_context *ctx = rdev->mode_info.atom_context;
127 struct radeon_gpio_rec gpio;
128 int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
129 struct _ATOM_GPIO_PIN_LUT *gpio_info;
130 ATOM_GPIO_PIN_ASSIGNMENT *pin;
131 u16 data_offset, size;
132 int i, num_indices;
133
134 memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
135 gpio.valid = false;
136
137 atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
138
139 gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
140
141 num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
142
143 for (i = 0; i < num_indices; i++) {
144 pin = &gpio_info->asGPIO_Pin[i];
145 if (id == pin->ucGPIO_ID) {
146 gpio.id = pin->ucGPIO_ID;
147 gpio.reg = pin->usGpioPin_AIndex * 4;
148 gpio.mask = (1 << pin->ucGpioPinBitShift);
149 gpio.valid = true;
150 break;
151 }
152 }
153
154 return gpio;
155}
156
157static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
158 struct radeon_gpio_rec *gpio)
159{
160 struct radeon_hpd hpd;
161 hpd.gpio = *gpio;
162 if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
163 switch(gpio->mask) {
164 case (1 << 0):
165 hpd.hpd = RADEON_HPD_1;
166 break;
167 case (1 << 8):
168 hpd.hpd = RADEON_HPD_2;
169 break;
170 case (1 << 16):
171 hpd.hpd = RADEON_HPD_3;
172 break;
173 case (1 << 24):
174 hpd.hpd = RADEON_HPD_4;
175 break;
176 case (1 << 26):
177 hpd.hpd = RADEON_HPD_5;
178 break;
179 case (1 << 28):
180 hpd.hpd = RADEON_HPD_6;
181 break;
182 default:
183 hpd.hpd = RADEON_HPD_NONE;
184 break;
185 }
186 } else
187 hpd.hpd = RADEON_HPD_NONE;
188 return hpd;
189}
190
104static bool radeon_atom_apply_quirks(struct drm_device *dev, 191static bool radeon_atom_apply_quirks(struct drm_device *dev,
105 uint32_t supported_device, 192 uint32_t supported_device,
106 int *connector_type, 193 int *connector_type,
107 struct radeon_i2c_bus_rec *i2c_bus, 194 struct radeon_i2c_bus_rec *i2c_bus,
108 uint16_t *line_mux) 195 uint16_t *line_mux,
196 struct radeon_hpd *hpd)
109{ 197{
110 198
111 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ 199 /* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
135 } 223 }
136 } 224 }
137 225
226 /* HIS X1300 is DVI+VGA, not DVI+DVI */
227 if ((dev->pdev->device == 0x7146) &&
228 (dev->pdev->subsystem_vendor == 0x17af) &&
229 (dev->pdev->subsystem_device == 0x2058)) {
230 if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
231 return false;
232 }
233
234 /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
235 if ((dev->pdev->device == 0x7142) &&
236 (dev->pdev->subsystem_vendor == 0x1458) &&
237 (dev->pdev->subsystem_device == 0x2134)) {
238 if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
239 return false;
240 }
241
242
138 /* Funky macbooks */ 243 /* Funky macbooks */
139 if ((dev->pdev->device == 0x71C5) && 244 if ((dev->pdev->device == 0x71C5) &&
140 (dev->pdev->subsystem_vendor == 0x106b) && 245 (dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
172 } 277 }
173 } 278 }
174 279
280 /* Acer laptop reports DVI-D as DVI-I */
281 if ((dev->pdev->device == 0x95c4) &&
282 (dev->pdev->subsystem_vendor == 0x1025) &&
283 (dev->pdev->subsystem_device == 0x013c)) {
284 if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
285 (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
286 *connector_type = DRM_MODE_CONNECTOR_DVID;
287 }
288
175 return true; 289 return true;
176} 290}
177 291
@@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
240 struct radeon_mode_info *mode_info = &rdev->mode_info; 354 struct radeon_mode_info *mode_info = &rdev->mode_info;
241 struct atom_context *ctx = mode_info->atom_context; 355 struct atom_context *ctx = mode_info->atom_context;
242 int index = GetIndexIntoMasterTable(DATA, Object_Header); 356 int index = GetIndexIntoMasterTable(DATA, Object_Header);
243 uint16_t size, data_offset; 357 u16 size, data_offset;
244 uint8_t frev, crev, line_mux = 0; 358 u8 frev, crev;
245 ATOM_CONNECTOR_OBJECT_TABLE *con_obj; 359 ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
246 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; 360 ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
247 ATOM_OBJECT_HEADER *obj_header; 361 ATOM_OBJECT_HEADER *obj_header;
248 int i, j, path_size, device_support; 362 int i, j, path_size, device_support;
249 int connector_type; 363 int connector_type;
250 uint16_t igp_lane_info, conn_id, connector_object_id; 364 u16 igp_lane_info, conn_id, connector_object_id;
251 bool linkb; 365 bool linkb;
252 struct radeon_i2c_bus_rec ddc_bus; 366 struct radeon_i2c_bus_rec ddc_bus;
367 struct radeon_gpio_rec gpio;
368 struct radeon_hpd hpd;
253 369
254 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 370 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
255 371
@@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
276 path = (ATOM_DISPLAY_OBJECT_PATH *) addr; 392 path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
277 path_size += le16_to_cpu(path->usSize); 393 path_size += le16_to_cpu(path->usSize);
278 linkb = false; 394 linkb = false;
279
280 if (device_support & le16_to_cpu(path->usDeviceTag)) { 395 if (device_support & le16_to_cpu(path->usDeviceTag)) {
281 uint8_t con_obj_id, con_obj_num, con_obj_type; 396 uint8_t con_obj_id, con_obj_num, con_obj_type;
282 397
@@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
377 } 492 }
378 } 493 }
379 494
380 /* look up gpio for ddc */ 495 /* look up gpio for ddc, hpd */
381 if ((le16_to_cpu(path->usDeviceTag) & 496 if ((le16_to_cpu(path->usDeviceTag) &
382 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) 497 (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
383 == 0) {
384 for (j = 0; j < con_obj->ucNumberOfObjects; j++) { 498 for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
385 if (le16_to_cpu(path->usConnObjectId) == 499 if (le16_to_cpu(path->usConnObjectId) ==
386 le16_to_cpu(con_obj->asObjects[j]. 500 le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
394 asObjects[j]. 508 asObjects[j].
395 usRecordOffset)); 509 usRecordOffset));
396 ATOM_I2C_RECORD *i2c_record; 510 ATOM_I2C_RECORD *i2c_record;
511 ATOM_HPD_INT_RECORD *hpd_record;
512 ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
513 hpd.hpd = RADEON_HPD_NONE;
397 514
398 while (record->ucRecordType > 0 515 while (record->ucRecordType > 0
399 && record-> 516 && record->
400 ucRecordType <= 517 ucRecordType <=
401 ATOM_MAX_OBJECT_RECORD_NUMBER) { 518 ATOM_MAX_OBJECT_RECORD_NUMBER) {
402 switch (record-> 519 switch (record->ucRecordType) {
403 ucRecordType) {
404 case ATOM_I2C_RECORD_TYPE: 520 case ATOM_I2C_RECORD_TYPE:
405 i2c_record = 521 i2c_record =
406 (ATOM_I2C_RECORD 522 (ATOM_I2C_RECORD *)
407 *) record; 523 record;
408 line_mux = 524 i2c_config =
409 i2c_record-> 525 (ATOM_I2C_ID_CONFIG_ACCESS *)
410 sucI2cId. 526 &i2c_record->sucI2cId;
411 bfI2C_LineMux; 527 ddc_bus = radeon_lookup_i2c_gpio(rdev,
528 i2c_config->
529 ucAccess);
530 break;
531 case ATOM_HPD_INT_RECORD_TYPE:
532 hpd_record =
533 (ATOM_HPD_INT_RECORD *)
534 record;
535 gpio = radeon_lookup_gpio(rdev,
536 hpd_record->ucHPDIntGPIOID);
537 hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
538 hpd.plugged_state = hpd_record->ucPlugged_PinState;
412 break; 539 break;
413 } 540 }
414 record = 541 record =
@@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
421 break; 548 break;
422 } 549 }
423 } 550 }
424 } else 551 } else {
425 line_mux = 0; 552 hpd.hpd = RADEON_HPD_NONE;
426
427 if ((le16_to_cpu(path->usDeviceTag) ==
428 ATOM_DEVICE_TV1_SUPPORT)
429 || (le16_to_cpu(path->usDeviceTag) ==
430 ATOM_DEVICE_TV2_SUPPORT)
431 || (le16_to_cpu(path->usDeviceTag) ==
432 ATOM_DEVICE_CV_SUPPORT))
433 ddc_bus.valid = false; 553 ddc_bus.valid = false;
434 else 554 }
435 ddc_bus = radeon_lookup_gpio(dev, line_mux);
436 555
437 conn_id = le16_to_cpu(path->usConnObjectId); 556 conn_id = le16_to_cpu(path->usConnObjectId);
438 557
439 if (!radeon_atom_apply_quirks 558 if (!radeon_atom_apply_quirks
440 (dev, le16_to_cpu(path->usDeviceTag), &connector_type, 559 (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
441 &ddc_bus, &conn_id)) 560 &ddc_bus, &conn_id, &hpd))
442 continue; 561 continue;
443 562
444 radeon_add_atom_connector(dev, 563 radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
447 usDeviceTag), 566 usDeviceTag),
448 connector_type, &ddc_bus, 567 connector_type, &ddc_bus,
449 linkb, igp_lane_info, 568 linkb, igp_lane_info,
450 connector_object_id); 569 connector_object_id,
570 &hpd);
451 571
452 } 572 }
453 } 573 }
@@ -502,6 +622,7 @@ struct bios_connector {
502 uint16_t devices; 622 uint16_t devices;
503 int connector_type; 623 int connector_type;
504 struct radeon_i2c_bus_rec ddc_bus; 624 struct radeon_i2c_bus_rec ddc_bus;
625 struct radeon_hpd hpd;
505}; 626};
506 627
507bool radeon_get_atom_connector_info_from_supported_devices_table(struct 628bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
517 uint16_t device_support; 638 uint16_t device_support;
518 uint8_t dac; 639 uint8_t dac;
519 union atom_supported_devices *supported_devices; 640 union atom_supported_devices *supported_devices;
520 int i, j; 641 int i, j, max_device;
521 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE]; 642 struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
522 643
523 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset); 644 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
527 648
528 device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); 649 device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
529 650
530 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 651 if (frev > 1)
652 max_device = ATOM_MAX_SUPPORTED_DEVICE;
653 else
654 max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
655
656 for (i = 0; i < max_device; i++) {
531 ATOM_CONNECTOR_INFO_I2C ci = 657 ATOM_CONNECTOR_INFO_I2C ci =
532 supported_devices->info.asConnInfo[i]; 658 supported_devices->info.asConnInfo[i];
533 659
@@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
553 679
554 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; 680 dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
555 681
556 if ((rdev->family == CHIP_RS690) || 682 bios_connectors[i].line_mux =
557 (rdev->family == CHIP_RS740)) { 683 ci.sucI2cId.ucAccess;
558 if ((i == ATOM_DEVICE_DFP2_INDEX)
559 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
560 bios_connectors[i].line_mux =
561 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
562 else if ((i == ATOM_DEVICE_DFP3_INDEX)
563 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
564 bios_connectors[i].line_mux =
565 ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
566 else
567 bios_connectors[i].line_mux =
568 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
569 } else
570 bios_connectors[i].line_mux =
571 ci.sucI2cId.sbfAccess.bfI2C_LineMux;
572 684
573 /* give tv unique connector ids */ 685 /* give tv unique connector ids */
574 if (i == ATOM_DEVICE_TV1_INDEX) { 686 if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
582 bios_connectors[i].line_mux = 52; 694 bios_connectors[i].line_mux = 52;
583 } else 695 } else
584 bios_connectors[i].ddc_bus = 696 bios_connectors[i].ddc_bus =
585 radeon_lookup_gpio(dev, 697 radeon_lookup_i2c_gpio(rdev,
586 bios_connectors[i].line_mux); 698 bios_connectors[i].line_mux);
699
700 if ((crev > 1) && (frev > 1)) {
701 u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
702 switch (isb) {
703 case 0x4:
704 bios_connectors[i].hpd.hpd = RADEON_HPD_1;
705 break;
706 case 0xa:
707 bios_connectors[i].hpd.hpd = RADEON_HPD_2;
708 break;
709 default:
710 bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
711 break;
712 }
713 } else {
714 if (i == ATOM_DEVICE_DFP1_INDEX)
715 bios_connectors[i].hpd.hpd = RADEON_HPD_1;
716 else if (i == ATOM_DEVICE_DFP2_INDEX)
717 bios_connectors[i].hpd.hpd = RADEON_HPD_2;
718 else
719 bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
720 }
587 721
588 /* Always set the connector type to VGA for CRT1/CRT2. if they are 722 /* Always set the connector type to VGA for CRT1/CRT2. if they are
589 * shared with a DVI port, we'll pick up the DVI connector when we 723 * shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
595 729
596 if (!radeon_atom_apply_quirks 730 if (!radeon_atom_apply_quirks
597 (dev, (1 << i), &bios_connectors[i].connector_type, 731 (dev, (1 << i), &bios_connectors[i].connector_type,
598 &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) 732 &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
733 &bios_connectors[i].hpd))
599 continue; 734 continue;
600 735
601 bios_connectors[i].valid = true; 736 bios_connectors[i].valid = true;
@@ -617,9 +752,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
617 } 752 }
618 753
619 /* combine shared connectors */ 754 /* combine shared connectors */
620 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 755 for (i = 0; i < max_device; i++) {
621 if (bios_connectors[i].valid) { 756 if (bios_connectors[i].valid) {
622 for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) { 757 for (j = 0; j < max_device; j++) {
623 if (bios_connectors[j].valid && (i != j)) { 758 if (bios_connectors[j].valid && (i != j)) {
624 if (bios_connectors[i].line_mux == 759 if (bios_connectors[i].line_mux ==
625 bios_connectors[j].line_mux) { 760 bios_connectors[j].line_mux) {
@@ -643,6 +778,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
643 bios_connectors[i]. 778 bios_connectors[i].
644 connector_type = 779 connector_type =
645 DRM_MODE_CONNECTOR_DVII; 780 DRM_MODE_CONNECTOR_DVII;
781 if (bios_connectors[j].devices &
782 (ATOM_DEVICE_DFP_SUPPORT))
783 bios_connectors[i].hpd =
784 bios_connectors[j].hpd;
646 bios_connectors[j]. 785 bios_connectors[j].
647 valid = false; 786 valid = false;
648 } 787 }
@@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
653 } 792 }
654 793
655 /* add the connectors */ 794 /* add the connectors */
656 for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) { 795 for (i = 0; i < max_device; i++) {
657 if (bios_connectors[i].valid) { 796 if (bios_connectors[i].valid) {
658 uint16_t connector_object_id = 797 uint16_t connector_object_id =
659 atombios_get_connector_object_id(dev, 798 atombios_get_connector_object_id(dev,
@@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
666 connector_type, 805 connector_type,
667 &bios_connectors[i].ddc_bus, 806 &bios_connectors[i].ddc_bus,
668 false, 0, 807 false, 0,
669 connector_object_id); 808 connector_object_id,
809 &bios_connectors[i].hpd);
670 } 810 }
671 } 811 }
672 812
@@ -731,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
731 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per 871 * pre-DCE 3.0 r6xx hardware. This might need to be adjusted per
732 * family. 872 * family.
733 */ 873 */
734 p1pll->pll_out_min = 64800; 874 if (!radeon_new_pll)
875 p1pll->pll_out_min = 64800;
735 } 876 }
736 877
737 p1pll->pll_in_min = 878 p1pll->pll_in_min =
@@ -861,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
861 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; 1002 struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
862 uint8_t frev, crev; 1003 uint8_t frev, crev;
863 struct radeon_atom_ss *ss = NULL; 1004 struct radeon_atom_ss *ss = NULL;
1005 int i;
864 1006
865 if (id > ATOM_MAX_SS_ENTRY) 1007 if (id > ATOM_MAX_SS_ENTRY)
866 return NULL; 1008 return NULL;
@@ -878,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
878 if (!ss) 1020 if (!ss)
879 return NULL; 1021 return NULL;
880 1022
881 ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage); 1023 for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
882 ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType; 1024 if (ss_info->asSS_Info[i].ucSS_Id == id) {
883 ss->step = ss_info->asSS_Info[id].ucSS_Step; 1025 ss->percentage =
884 ss->delay = ss_info->asSS_Info[id].ucSS_Delay; 1026 le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
885 ss->range = ss_info->asSS_Info[id].ucSS_Range; 1027 ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
886 ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div; 1028 ss->step = ss_info->asSS_Info[i].ucSS_Step;
1029 ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
1030 ss->range = ss_info->asSS_Info[i].ucSS_Range;
1031 ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
1032 }
1033 }
887 } 1034 }
888 return ss; 1035 return ss;
889} 1036}
@@ -901,7 +1048,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
901 struct radeon_device *rdev = dev->dev_private; 1048 struct radeon_device *rdev = dev->dev_private;
902 struct radeon_mode_info *mode_info = &rdev->mode_info; 1049 struct radeon_mode_info *mode_info = &rdev->mode_info;
903 int index = GetIndexIntoMasterTable(DATA, LVDS_Info); 1050 int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
904 uint16_t data_offset; 1051 uint16_t data_offset, misc;
905 union lvds_info *lvds_info; 1052 union lvds_info *lvds_info;
906 uint8_t frev, crev; 1053 uint8_t frev, crev;
907 struct radeon_encoder_atom_dig *lvds = NULL; 1054 struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1087,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
940 lvds->panel_pwr_delay = 1087 lvds->panel_pwr_delay =
941 le16_to_cpu(lvds_info->info.usOffDelayInMs); 1088 le16_to_cpu(lvds_info->info.usOffDelayInMs);
942 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc; 1089 lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
1090
1091 misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
1092 if (misc & ATOM_VSYNC_POLARITY)
1093 lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
1094 if (misc & ATOM_HSYNC_POLARITY)
1095 lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
1096 if (misc & ATOM_COMPOSITESYNC)
1097 lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
1098 if (misc & ATOM_INTERLACE)
1099 lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
1100 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1101 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1102
943 /* set crtc values */ 1103 /* set crtc values */
944 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1104 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
945 1105
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 10bd50a7db87..4ddfd4b5bc51 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -29,8 +29,8 @@
29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize, 29void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
30 unsigned sdomain, unsigned ddomain) 30 unsigned sdomain, unsigned ddomain)
31{ 31{
32 struct radeon_object *dobj = NULL; 32 struct radeon_bo *dobj = NULL;
33 struct radeon_object *sobj = NULL; 33 struct radeon_bo *sobj = NULL;
34 struct radeon_fence *fence = NULL; 34 struct radeon_fence *fence = NULL;
35 uint64_t saddr, daddr; 35 uint64_t saddr, daddr;
36 unsigned long start_jiffies; 36 unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
41 41
42 size = bsize; 42 size = bsize;
43 n = 1024; 43 n = 1024;
44 r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj); 44 r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
45 if (r) { 45 if (r) {
46 goto out_cleanup; 46 goto out_cleanup;
47 } 47 }
48 r = radeon_object_pin(sobj, sdomain, &saddr); 48 r = radeon_bo_reserve(sobj, false);
49 if (unlikely(r != 0))
50 goto out_cleanup;
51 r = radeon_bo_pin(sobj, sdomain, &saddr);
52 radeon_bo_unreserve(sobj);
49 if (r) { 53 if (r) {
50 goto out_cleanup; 54 goto out_cleanup;
51 } 55 }
52 r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj); 56 r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
53 if (r) { 57 if (r) {
54 goto out_cleanup; 58 goto out_cleanup;
55 } 59 }
56 r = radeon_object_pin(dobj, ddomain, &daddr); 60 r = radeon_bo_reserve(dobj, false);
61 if (unlikely(r != 0))
62 goto out_cleanup;
63 r = radeon_bo_pin(dobj, ddomain, &daddr);
64 radeon_bo_unreserve(dobj);
57 if (r) { 65 if (r) {
58 goto out_cleanup; 66 goto out_cleanup;
59 } 67 }
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
109 } 117 }
110out_cleanup: 118out_cleanup:
111 if (sobj) { 119 if (sobj) {
112 radeon_object_unpin(sobj); 120 r = radeon_bo_reserve(sobj, false);
113 radeon_object_unref(&sobj); 121 if (likely(r == 0)) {
122 radeon_bo_unpin(sobj);
123 radeon_bo_unreserve(sobj);
124 }
125 radeon_bo_unref(&sobj);
114 } 126 }
115 if (dobj) { 127 if (dobj) {
116 radeon_object_unpin(dobj); 128 r = radeon_bo_reserve(dobj, false);
117 radeon_object_unref(&dobj); 129 if (likely(r == 0)) {
130 radeon_bo_unpin(dobj);
131 radeon_bo_unreserve(dobj);
132 }
133 radeon_bo_unref(&dobj);
118 } 134 }
119 if (fence) { 135 if (fence) {
120 radeon_fence_unref(&fence); 136 radeon_fence_unref(&fence);
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index a81354167621..b062109efbee 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
44 44
45 ref_div = 45 ref_div =
46 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; 46 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
47
48 if (ref_div == 0)
49 return 0;
50
47 sclk = fb_div / ref_div; 51 sclk = fb_div / ref_div;
48 52
49 post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK; 53 post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
70 74
71 ref_div = 75 ref_div =
72 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK; 76 RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
77
78 if (ref_div == 0)
79 return 0;
80
73 mclk = fb_div / ref_div; 81 mclk = fb_div / ref_div;
74 82
75 post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7; 83 post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
98 ret = radeon_combios_get_clock_info(dev); 106 ret = radeon_combios_get_clock_info(dev);
99 107
100 if (ret) { 108 if (ret) {
101 if (p1pll->reference_div < 2) 109 if (p1pll->reference_div < 2) {
102 p1pll->reference_div = 12; 110 if (!ASIC_IS_AVIVO(rdev)) {
111 u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
112 if (ASIC_IS_R300(rdev))
113 p1pll->reference_div =
114 (tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
115 else
116 p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
117 if (p1pll->reference_div < 2)
118 p1pll->reference_div = 12;
119 } else
120 p1pll->reference_div = 12;
121 }
103 if (p2pll->reference_div < 2) 122 if (p2pll->reference_div < 2)
104 p2pll->reference_div = 12; 123 p2pll->reference_div = 12;
105 if (rdev->family < CHIP_RS600) { 124 if (rdev->family < CHIP_RS600) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5253cbf6db1f..c5021a3445de 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -50,7 +50,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
50 uint32_t supported_device, 50 uint32_t supported_device,
51 int connector_type, 51 int connector_type,
52 struct radeon_i2c_bus_rec *i2c_bus, 52 struct radeon_i2c_bus_rec *i2c_bus,
53 uint16_t connector_object_id); 53 uint16_t connector_object_id,
54 struct radeon_hpd *hpd);
54 55
55/* from radeon_legacy_encoder.c */ 56/* from radeon_legacy_encoder.c */
56extern void 57extern void
@@ -442,38 +443,70 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
442 443
443} 444}
444 445
445struct radeon_i2c_bus_rec combios_setup_i2c_bus(int ddc_line) 446static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
447 int ddc_line)
446{ 448{
447 struct radeon_i2c_bus_rec i2c; 449 struct radeon_i2c_bus_rec i2c;
448 450
449 i2c.mask_clk_mask = RADEON_GPIO_EN_1; 451 if (ddc_line == RADEON_GPIOPAD_MASK) {
450 i2c.mask_data_mask = RADEON_GPIO_EN_0; 452 i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
451 i2c.a_clk_mask = RADEON_GPIO_A_1; 453 i2c.mask_data_reg = RADEON_GPIOPAD_MASK;
452 i2c.a_data_mask = RADEON_GPIO_A_0; 454 i2c.a_clk_reg = RADEON_GPIOPAD_A;
453 i2c.put_clk_mask = RADEON_GPIO_EN_1; 455 i2c.a_data_reg = RADEON_GPIOPAD_A;
454 i2c.put_data_mask = RADEON_GPIO_EN_0; 456 i2c.en_clk_reg = RADEON_GPIOPAD_EN;
455 i2c.get_clk_mask = RADEON_GPIO_Y_1; 457 i2c.en_data_reg = RADEON_GPIOPAD_EN;
456 i2c.get_data_mask = RADEON_GPIO_Y_0; 458 i2c.y_clk_reg = RADEON_GPIOPAD_Y;
457 if ((ddc_line == RADEON_LCD_GPIO_MASK) || 459 i2c.y_data_reg = RADEON_GPIOPAD_Y;
458 (ddc_line == RADEON_MDGPIO_EN_REG)) { 460 } else if (ddc_line == RADEON_MDGPIO_MASK) {
459 i2c.mask_clk_reg = ddc_line; 461 i2c.mask_clk_reg = RADEON_MDGPIO_MASK;
460 i2c.mask_data_reg = ddc_line; 462 i2c.mask_data_reg = RADEON_MDGPIO_MASK;
461 i2c.a_clk_reg = ddc_line; 463 i2c.a_clk_reg = RADEON_MDGPIO_A;
462 i2c.a_data_reg = ddc_line; 464 i2c.a_data_reg = RADEON_MDGPIO_A;
463 i2c.put_clk_reg = ddc_line; 465 i2c.en_clk_reg = RADEON_MDGPIO_EN;
464 i2c.put_data_reg = ddc_line; 466 i2c.en_data_reg = RADEON_MDGPIO_EN;
465 i2c.get_clk_reg = ddc_line + 4; 467 i2c.y_clk_reg = RADEON_MDGPIO_Y;
466 i2c.get_data_reg = ddc_line + 4; 468 i2c.y_data_reg = RADEON_MDGPIO_Y;
467 } else { 469 } else {
470 i2c.mask_clk_mask = RADEON_GPIO_EN_1;
471 i2c.mask_data_mask = RADEON_GPIO_EN_0;
472 i2c.a_clk_mask = RADEON_GPIO_A_1;
473 i2c.a_data_mask = RADEON_GPIO_A_0;
474 i2c.en_clk_mask = RADEON_GPIO_EN_1;
475 i2c.en_data_mask = RADEON_GPIO_EN_0;
476 i2c.y_clk_mask = RADEON_GPIO_Y_1;
477 i2c.y_data_mask = RADEON_GPIO_Y_0;
478
468 i2c.mask_clk_reg = ddc_line; 479 i2c.mask_clk_reg = ddc_line;
469 i2c.mask_data_reg = ddc_line; 480 i2c.mask_data_reg = ddc_line;
470 i2c.a_clk_reg = ddc_line; 481 i2c.a_clk_reg = ddc_line;
471 i2c.a_data_reg = ddc_line; 482 i2c.a_data_reg = ddc_line;
472 i2c.put_clk_reg = ddc_line; 483 i2c.en_clk_reg = ddc_line;
473 i2c.put_data_reg = ddc_line; 484 i2c.en_data_reg = ddc_line;
474 i2c.get_clk_reg = ddc_line; 485 i2c.y_clk_reg = ddc_line;
475 i2c.get_data_reg = ddc_line; 486 i2c.y_data_reg = ddc_line;
487 }
488
489 if (rdev->family < CHIP_R200)
490 i2c.hw_capable = false;
491 else {
492 switch (ddc_line) {
493 case RADEON_GPIO_VGA_DDC:
494 case RADEON_GPIO_DVI_DDC:
495 i2c.hw_capable = true;
496 break;
497 case RADEON_GPIO_MONID:
498 /* hw i2c on RADEON_GPIO_MONID doesn't seem to work
499 * reliably on some pre-r4xx hardware; not sure why.
500 */
501 i2c.hw_capable = false;
502 break;
503 default:
504 i2c.hw_capable = false;
505 break;
506 }
476 } 507 }
508 i2c.mm_i2c = false;
509 i2c.i2c_id = 0;
477 510
478 if (ddc_line) 511 if (ddc_line)
479 i2c.valid = true; 512 i2c.valid = true;
@@ -495,7 +528,7 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
495 uint16_t sclk, mclk; 528 uint16_t sclk, mclk;
496 529
497 if (rdev->bios == NULL) 530 if (rdev->bios == NULL)
498 return NULL; 531 return false;
499 532
500 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE); 533 pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
501 if (pll_info) { 534 if (pll_info) {
@@ -993,8 +1026,8 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = {
993 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */ 1026 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R420 */
994 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */ 1027 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_R423 */
995 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */ 1028 {{0xffffffff, 0xb01cb}, {0, 0}, {0, 0}, {0, 0}}, /* CHIP_RV410 */
996 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS400 */ 1029 { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS400 */
997 {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ 1030 { {0, 0}, {0, 0}, {0, 0}, {0, 0} }, /* CHIP_RS480 */
998}; 1031};
999 1032
1000bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, 1033bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
@@ -1028,7 +1061,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1028 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); 1061 tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
1029 1062
1030 if (tmds_info) { 1063 if (tmds_info) {
1031
1032 ver = RBIOS8(tmds_info); 1064 ver = RBIOS8(tmds_info);
1033 DRM_INFO("DFP table revision: %d\n", ver); 1065 DRM_INFO("DFP table revision: %d\n", ver);
1034 if (ver == 3) { 1066 if (ver == 3) {
@@ -1063,51 +1095,139 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
1063 tmds->tmds_pll[i].value); 1095 tmds->tmds_pll[i].value);
1064 } 1096 }
1065 } 1097 }
1066 } else 1098 } else {
1067 DRM_INFO("No TMDS info found in BIOS\n"); 1099 DRM_INFO("No TMDS info found in BIOS\n");
1100 return false;
1101 }
1068 return true; 1102 return true;
1069} 1103}
1070 1104
1071struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) 1105bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
1106 struct radeon_encoder_ext_tmds *tmds)
1072{ 1107{
1073 struct radeon_encoder_int_tmds *tmds = NULL; 1108 struct drm_device *dev = encoder->base.dev;
1074 bool ret; 1109 struct radeon_device *rdev = dev->dev_private;
1110 struct radeon_i2c_bus_rec i2c_bus;
1075 1111
1076 tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); 1112 /* default for macs */
1113 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1114 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1077 1115
1078 if (!tmds) 1116 /* XXX some macs have duallink chips */
1079 return NULL; 1117 switch (rdev->mode_info.connector_table) {
1080 1118 case CT_POWERBOOK_EXTERNAL:
1081 ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); 1119 case CT_MINI_EXTERNAL:
1082 if (ret == false) 1120 default:
1083 radeon_legacy_get_tmds_info_from_table(encoder, tmds); 1121 tmds->dvo_chip = DVO_SIL164;
1122 tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
1123 break;
1124 }
1084 1125
1085 return tmds; 1126 return true;
1086} 1127}
1087 1128
1088void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder) 1129bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
1130 struct radeon_encoder_ext_tmds *tmds)
1089{ 1131{
1090 struct drm_device *dev = encoder->base.dev; 1132 struct drm_device *dev = encoder->base.dev;
1091 struct radeon_device *rdev = dev->dev_private; 1133 struct radeon_device *rdev = dev->dev_private;
1092 uint16_t ext_tmds_info; 1134 uint16_t offset;
1093 uint8_t ver; 1135 uint8_t ver, id, blocks, clk, data;
1136 int i;
1137 enum radeon_combios_ddc gpio;
1138 struct radeon_i2c_bus_rec i2c_bus;
1094 1139
1095 if (rdev->bios == NULL) 1140 if (rdev->bios == NULL)
1096 return; 1141 return false;
1097 1142
1098 ext_tmds_info = 1143 tmds->i2c_bus = NULL;
1099 combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE); 1144 if (rdev->flags & RADEON_IS_IGP) {
1100 if (ext_tmds_info) { 1145 offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
1101 ver = RBIOS8(ext_tmds_info); 1146 if (offset) {
1102 DRM_INFO("External TMDS Table revision: %d\n", ver); 1147 ver = RBIOS8(offset);
1103 // TODO 1148 DRM_INFO("GPIO Table revision: %d\n", ver);
1149 blocks = RBIOS8(offset + 2);
1150 for (i = 0; i < blocks; i++) {
1151 id = RBIOS8(offset + 3 + (i * 5) + 0);
1152 if (id == 136) {
1153 clk = RBIOS8(offset + 3 + (i * 5) + 3);
1154 data = RBIOS8(offset + 3 + (i * 5) + 4);
1155 i2c_bus.valid = true;
1156 i2c_bus.mask_clk_mask = (1 << clk);
1157 i2c_bus.mask_data_mask = (1 << data);
1158 i2c_bus.a_clk_mask = (1 << clk);
1159 i2c_bus.a_data_mask = (1 << data);
1160 i2c_bus.en_clk_mask = (1 << clk);
1161 i2c_bus.en_data_mask = (1 << data);
1162 i2c_bus.y_clk_mask = (1 << clk);
1163 i2c_bus.y_data_mask = (1 << data);
1164 i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
1165 i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
1166 i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
1167 i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
1168 i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
1169 i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
1170 i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
1171 i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
1172 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1173 tmds->dvo_chip = DVO_SIL164;
1174 tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
1175 break;
1176 }
1177 }
1178 }
1179 } else {
1180 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
1181 if (offset) {
1182 ver = RBIOS8(offset);
1183 DRM_INFO("External TMDS Table revision: %d\n", ver);
1184 tmds->slave_addr = RBIOS8(offset + 4 + 2);
1185 tmds->slave_addr >>= 1; /* 7 bit addressing */
1186 gpio = RBIOS8(offset + 4 + 3);
1187 switch (gpio) {
1188 case DDC_MONID:
1189 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1190 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1191 break;
1192 case DDC_DVI:
1193 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1194 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1195 break;
1196 case DDC_VGA:
1197 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1198 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1199 break;
1200 case DDC_CRT2:
1201 /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
1202 if (rdev->family >= CHIP_R300)
1203 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1204 else
1205 i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1206 tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
1207 break;
1208 case DDC_LCD: /* MM i2c */
1209 DRM_ERROR("MM i2c requires hw i2c engine\n");
1210 break;
1211 default:
1212 DRM_ERROR("Unsupported gpio %d\n", gpio);
1213 break;
1214 }
1215 }
1104 } 1216 }
1217
1218 if (!tmds->i2c_bus) {
1219 DRM_INFO("No valid Ext TMDS info found in BIOS\n");
1220 return false;
1221 }
1222
1223 return true;
1105} 1224}
1106 1225
1107bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev) 1226bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1108{ 1227{
1109 struct radeon_device *rdev = dev->dev_private; 1228 struct radeon_device *rdev = dev->dev_private;
1110 struct radeon_i2c_bus_rec ddc_i2c; 1229 struct radeon_i2c_bus_rec ddc_i2c;
1230 struct radeon_hpd hpd;
1111 1231
1112 rdev->mode_info.connector_table = radeon_connector_table; 1232 rdev->mode_info.connector_table = radeon_connector_table;
1113 if (rdev->mode_info.connector_table == CT_NONE) { 1233 if (rdev->mode_info.connector_table == CT_NONE) {
@@ -1168,7 +1288,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1168 /* these are the most common settings */ 1288 /* these are the most common settings */
1169 if (rdev->flags & RADEON_SINGLE_CRTC) { 1289 if (rdev->flags & RADEON_SINGLE_CRTC) {
1170 /* VGA - primary dac */ 1290 /* VGA - primary dac */
1171 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1291 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1292 hpd.hpd = RADEON_HPD_NONE;
1172 radeon_add_legacy_encoder(dev, 1293 radeon_add_legacy_encoder(dev,
1173 radeon_get_encoder_id(dev, 1294 radeon_get_encoder_id(dev,
1174 ATOM_DEVICE_CRT1_SUPPORT, 1295 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1178,10 +1299,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1178 ATOM_DEVICE_CRT1_SUPPORT, 1299 ATOM_DEVICE_CRT1_SUPPORT,
1179 DRM_MODE_CONNECTOR_VGA, 1300 DRM_MODE_CONNECTOR_VGA,
1180 &ddc_i2c, 1301 &ddc_i2c,
1181 CONNECTOR_OBJECT_ID_VGA); 1302 CONNECTOR_OBJECT_ID_VGA,
1303 &hpd);
1182 } else if (rdev->flags & RADEON_IS_MOBILITY) { 1304 } else if (rdev->flags & RADEON_IS_MOBILITY) {
1183 /* LVDS */ 1305 /* LVDS */
1184 ddc_i2c = combios_setup_i2c_bus(RADEON_LCD_GPIO_MASK); 1306 ddc_i2c = combios_setup_i2c_bus(rdev, 0);
1307 hpd.hpd = RADEON_HPD_NONE;
1185 radeon_add_legacy_encoder(dev, 1308 radeon_add_legacy_encoder(dev,
1186 radeon_get_encoder_id(dev, 1309 radeon_get_encoder_id(dev,
1187 ATOM_DEVICE_LCD1_SUPPORT, 1310 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1191,10 +1314,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1191 ATOM_DEVICE_LCD1_SUPPORT, 1314 ATOM_DEVICE_LCD1_SUPPORT,
1192 DRM_MODE_CONNECTOR_LVDS, 1315 DRM_MODE_CONNECTOR_LVDS,
1193 &ddc_i2c, 1316 &ddc_i2c,
1194 CONNECTOR_OBJECT_ID_LVDS); 1317 CONNECTOR_OBJECT_ID_LVDS,
1318 &hpd);
1195 1319
1196 /* VGA - primary dac */ 1320 /* VGA - primary dac */
1197 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1321 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1322 hpd.hpd = RADEON_HPD_NONE;
1198 radeon_add_legacy_encoder(dev, 1323 radeon_add_legacy_encoder(dev,
1199 radeon_get_encoder_id(dev, 1324 radeon_get_encoder_id(dev,
1200 ATOM_DEVICE_CRT1_SUPPORT, 1325 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1204,10 +1329,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1204 ATOM_DEVICE_CRT1_SUPPORT, 1329 ATOM_DEVICE_CRT1_SUPPORT,
1205 DRM_MODE_CONNECTOR_VGA, 1330 DRM_MODE_CONNECTOR_VGA,
1206 &ddc_i2c, 1331 &ddc_i2c,
1207 CONNECTOR_OBJECT_ID_VGA); 1332 CONNECTOR_OBJECT_ID_VGA,
1333 &hpd);
1208 } else { 1334 } else {
1209 /* DVI-I - tv dac, int tmds */ 1335 /* DVI-I - tv dac, int tmds */
1210 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1336 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1337 hpd.hpd = RADEON_HPD_1;
1211 radeon_add_legacy_encoder(dev, 1338 radeon_add_legacy_encoder(dev,
1212 radeon_get_encoder_id(dev, 1339 radeon_get_encoder_id(dev,
1213 ATOM_DEVICE_DFP1_SUPPORT, 1340 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1223,10 +1350,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1223 ATOM_DEVICE_CRT2_SUPPORT, 1350 ATOM_DEVICE_CRT2_SUPPORT,
1224 DRM_MODE_CONNECTOR_DVII, 1351 DRM_MODE_CONNECTOR_DVII,
1225 &ddc_i2c, 1352 &ddc_i2c,
1226 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1353 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1354 &hpd);
1227 1355
1228 /* VGA - primary dac */ 1356 /* VGA - primary dac */
1229 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1357 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1358 hpd.hpd = RADEON_HPD_NONE;
1230 radeon_add_legacy_encoder(dev, 1359 radeon_add_legacy_encoder(dev,
1231 radeon_get_encoder_id(dev, 1360 radeon_get_encoder_id(dev,
1232 ATOM_DEVICE_CRT1_SUPPORT, 1361 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1236,11 +1365,14 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1236 ATOM_DEVICE_CRT1_SUPPORT, 1365 ATOM_DEVICE_CRT1_SUPPORT,
1237 DRM_MODE_CONNECTOR_VGA, 1366 DRM_MODE_CONNECTOR_VGA,
1238 &ddc_i2c, 1367 &ddc_i2c,
1239 CONNECTOR_OBJECT_ID_VGA); 1368 CONNECTOR_OBJECT_ID_VGA,
1369 &hpd);
1240 } 1370 }
1241 1371
1242 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) { 1372 if (rdev->family != CHIP_R100 && rdev->family != CHIP_R200) {
1243 /* TV - tv dac */ 1373 /* TV - tv dac */
1374 ddc_i2c.valid = false;
1375 hpd.hpd = RADEON_HPD_NONE;
1244 radeon_add_legacy_encoder(dev, 1376 radeon_add_legacy_encoder(dev,
1245 radeon_get_encoder_id(dev, 1377 radeon_get_encoder_id(dev,
1246 ATOM_DEVICE_TV1_SUPPORT, 1378 ATOM_DEVICE_TV1_SUPPORT,
@@ -1250,14 +1382,16 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1250 ATOM_DEVICE_TV1_SUPPORT, 1382 ATOM_DEVICE_TV1_SUPPORT,
1251 DRM_MODE_CONNECTOR_SVIDEO, 1383 DRM_MODE_CONNECTOR_SVIDEO,
1252 &ddc_i2c, 1384 &ddc_i2c,
1253 CONNECTOR_OBJECT_ID_SVIDEO); 1385 CONNECTOR_OBJECT_ID_SVIDEO,
1386 &hpd);
1254 } 1387 }
1255 break; 1388 break;
1256 case CT_IBOOK: 1389 case CT_IBOOK:
1257 DRM_INFO("Connector Table: %d (ibook)\n", 1390 DRM_INFO("Connector Table: %d (ibook)\n",
1258 rdev->mode_info.connector_table); 1391 rdev->mode_info.connector_table);
1259 /* LVDS */ 1392 /* LVDS */
1260 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1393 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1394 hpd.hpd = RADEON_HPD_NONE;
1261 radeon_add_legacy_encoder(dev, 1395 radeon_add_legacy_encoder(dev,
1262 radeon_get_encoder_id(dev, 1396 radeon_get_encoder_id(dev,
1263 ATOM_DEVICE_LCD1_SUPPORT, 1397 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1265,9 +1399,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1265 ATOM_DEVICE_LCD1_SUPPORT); 1399 ATOM_DEVICE_LCD1_SUPPORT);
1266 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1400 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1267 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1401 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1268 CONNECTOR_OBJECT_ID_LVDS); 1402 CONNECTOR_OBJECT_ID_LVDS,
1403 &hpd);
1269 /* VGA - TV DAC */ 1404 /* VGA - TV DAC */
1270 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1405 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1406 hpd.hpd = RADEON_HPD_NONE;
1271 radeon_add_legacy_encoder(dev, 1407 radeon_add_legacy_encoder(dev,
1272 radeon_get_encoder_id(dev, 1408 radeon_get_encoder_id(dev,
1273 ATOM_DEVICE_CRT2_SUPPORT, 1409 ATOM_DEVICE_CRT2_SUPPORT,
@@ -1275,8 +1411,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1275 ATOM_DEVICE_CRT2_SUPPORT); 1411 ATOM_DEVICE_CRT2_SUPPORT);
1276 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1412 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1277 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1413 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1278 CONNECTOR_OBJECT_ID_VGA); 1414 CONNECTOR_OBJECT_ID_VGA,
1415 &hpd);
1279 /* TV - TV DAC */ 1416 /* TV - TV DAC */
1417 ddc_i2c.valid = false;
1418 hpd.hpd = RADEON_HPD_NONE;
1280 radeon_add_legacy_encoder(dev, 1419 radeon_add_legacy_encoder(dev,
1281 radeon_get_encoder_id(dev, 1420 radeon_get_encoder_id(dev,
1282 ATOM_DEVICE_TV1_SUPPORT, 1421 ATOM_DEVICE_TV1_SUPPORT,
@@ -1285,13 +1424,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1285 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1424 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1286 DRM_MODE_CONNECTOR_SVIDEO, 1425 DRM_MODE_CONNECTOR_SVIDEO,
1287 &ddc_i2c, 1426 &ddc_i2c,
1288 CONNECTOR_OBJECT_ID_SVIDEO); 1427 CONNECTOR_OBJECT_ID_SVIDEO,
1428 &hpd);
1289 break; 1429 break;
1290 case CT_POWERBOOK_EXTERNAL: 1430 case CT_POWERBOOK_EXTERNAL:
1291 DRM_INFO("Connector Table: %d (powerbook external tmds)\n", 1431 DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
1292 rdev->mode_info.connector_table); 1432 rdev->mode_info.connector_table);
1293 /* LVDS */ 1433 /* LVDS */
1294 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1434 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1435 hpd.hpd = RADEON_HPD_NONE;
1295 radeon_add_legacy_encoder(dev, 1436 radeon_add_legacy_encoder(dev,
1296 radeon_get_encoder_id(dev, 1437 radeon_get_encoder_id(dev,
1297 ATOM_DEVICE_LCD1_SUPPORT, 1438 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1299,9 +1440,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1299 ATOM_DEVICE_LCD1_SUPPORT); 1440 ATOM_DEVICE_LCD1_SUPPORT);
1300 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1441 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1301 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1442 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1302 CONNECTOR_OBJECT_ID_LVDS); 1443 CONNECTOR_OBJECT_ID_LVDS,
1444 &hpd);
1303 /* DVI-I - primary dac, ext tmds */ 1445 /* DVI-I - primary dac, ext tmds */
1304 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1446 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1447 hpd.hpd = RADEON_HPD_2; /* ??? */
1305 radeon_add_legacy_encoder(dev, 1448 radeon_add_legacy_encoder(dev,
1306 radeon_get_encoder_id(dev, 1449 radeon_get_encoder_id(dev,
1307 ATOM_DEVICE_DFP2_SUPPORT, 1450 ATOM_DEVICE_DFP2_SUPPORT,
@@ -1317,8 +1460,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1317 ATOM_DEVICE_DFP2_SUPPORT | 1460 ATOM_DEVICE_DFP2_SUPPORT |
1318 ATOM_DEVICE_CRT1_SUPPORT, 1461 ATOM_DEVICE_CRT1_SUPPORT,
1319 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1462 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1320 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I); 1463 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
1464 &hpd);
1321 /* TV - TV DAC */ 1465 /* TV - TV DAC */
1466 ddc_i2c.valid = false;
1467 hpd.hpd = RADEON_HPD_NONE;
1322 radeon_add_legacy_encoder(dev, 1468 radeon_add_legacy_encoder(dev,
1323 radeon_get_encoder_id(dev, 1469 radeon_get_encoder_id(dev,
1324 ATOM_DEVICE_TV1_SUPPORT, 1470 ATOM_DEVICE_TV1_SUPPORT,
@@ -1327,13 +1473,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1327 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1473 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1328 DRM_MODE_CONNECTOR_SVIDEO, 1474 DRM_MODE_CONNECTOR_SVIDEO,
1329 &ddc_i2c, 1475 &ddc_i2c,
1330 CONNECTOR_OBJECT_ID_SVIDEO); 1476 CONNECTOR_OBJECT_ID_SVIDEO,
1477 &hpd);
1331 break; 1478 break;
1332 case CT_POWERBOOK_INTERNAL: 1479 case CT_POWERBOOK_INTERNAL:
1333 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n", 1480 DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
1334 rdev->mode_info.connector_table); 1481 rdev->mode_info.connector_table);
1335 /* LVDS */ 1482 /* LVDS */
1336 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1483 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1484 hpd.hpd = RADEON_HPD_NONE;
1337 radeon_add_legacy_encoder(dev, 1485 radeon_add_legacy_encoder(dev,
1338 radeon_get_encoder_id(dev, 1486 radeon_get_encoder_id(dev,
1339 ATOM_DEVICE_LCD1_SUPPORT, 1487 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1341,9 +1489,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1341 ATOM_DEVICE_LCD1_SUPPORT); 1489 ATOM_DEVICE_LCD1_SUPPORT);
1342 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1490 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1343 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1491 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1344 CONNECTOR_OBJECT_ID_LVDS); 1492 CONNECTOR_OBJECT_ID_LVDS,
1493 &hpd);
1345 /* DVI-I - primary dac, int tmds */ 1494 /* DVI-I - primary dac, int tmds */
1346 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1495 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1496 hpd.hpd = RADEON_HPD_1; /* ??? */
1347 radeon_add_legacy_encoder(dev, 1497 radeon_add_legacy_encoder(dev,
1348 radeon_get_encoder_id(dev, 1498 radeon_get_encoder_id(dev,
1349 ATOM_DEVICE_DFP1_SUPPORT, 1499 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1358,8 +1508,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1358 ATOM_DEVICE_DFP1_SUPPORT | 1508 ATOM_DEVICE_DFP1_SUPPORT |
1359 ATOM_DEVICE_CRT1_SUPPORT, 1509 ATOM_DEVICE_CRT1_SUPPORT,
1360 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1510 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1361 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1511 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1512 &hpd);
1362 /* TV - TV DAC */ 1513 /* TV - TV DAC */
1514 ddc_i2c.valid = false;
1515 hpd.hpd = RADEON_HPD_NONE;
1363 radeon_add_legacy_encoder(dev, 1516 radeon_add_legacy_encoder(dev,
1364 radeon_get_encoder_id(dev, 1517 radeon_get_encoder_id(dev,
1365 ATOM_DEVICE_TV1_SUPPORT, 1518 ATOM_DEVICE_TV1_SUPPORT,
@@ -1368,13 +1521,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1368 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1521 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1369 DRM_MODE_CONNECTOR_SVIDEO, 1522 DRM_MODE_CONNECTOR_SVIDEO,
1370 &ddc_i2c, 1523 &ddc_i2c,
1371 CONNECTOR_OBJECT_ID_SVIDEO); 1524 CONNECTOR_OBJECT_ID_SVIDEO,
1525 &hpd);
1372 break; 1526 break;
1373 case CT_POWERBOOK_VGA: 1527 case CT_POWERBOOK_VGA:
1374 DRM_INFO("Connector Table: %d (powerbook vga)\n", 1528 DRM_INFO("Connector Table: %d (powerbook vga)\n",
1375 rdev->mode_info.connector_table); 1529 rdev->mode_info.connector_table);
1376 /* LVDS */ 1530 /* LVDS */
1377 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1531 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1532 hpd.hpd = RADEON_HPD_NONE;
1378 radeon_add_legacy_encoder(dev, 1533 radeon_add_legacy_encoder(dev,
1379 radeon_get_encoder_id(dev, 1534 radeon_get_encoder_id(dev,
1380 ATOM_DEVICE_LCD1_SUPPORT, 1535 ATOM_DEVICE_LCD1_SUPPORT,
@@ -1382,9 +1537,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1382 ATOM_DEVICE_LCD1_SUPPORT); 1537 ATOM_DEVICE_LCD1_SUPPORT);
1383 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT, 1538 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
1384 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c, 1539 DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
1385 CONNECTOR_OBJECT_ID_LVDS); 1540 CONNECTOR_OBJECT_ID_LVDS,
1541 &hpd);
1386 /* VGA - primary dac */ 1542 /* VGA - primary dac */
1387 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1543 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1544 hpd.hpd = RADEON_HPD_NONE;
1388 radeon_add_legacy_encoder(dev, 1545 radeon_add_legacy_encoder(dev,
1389 radeon_get_encoder_id(dev, 1546 radeon_get_encoder_id(dev,
1390 ATOM_DEVICE_CRT1_SUPPORT, 1547 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1392,8 +1549,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1392 ATOM_DEVICE_CRT1_SUPPORT); 1549 ATOM_DEVICE_CRT1_SUPPORT);
1393 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT, 1550 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT1_SUPPORT,
1394 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1551 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1395 CONNECTOR_OBJECT_ID_VGA); 1552 CONNECTOR_OBJECT_ID_VGA,
1553 &hpd);
1396 /* TV - TV DAC */ 1554 /* TV - TV DAC */
1555 ddc_i2c.valid = false;
1556 hpd.hpd = RADEON_HPD_NONE;
1397 radeon_add_legacy_encoder(dev, 1557 radeon_add_legacy_encoder(dev,
1398 radeon_get_encoder_id(dev, 1558 radeon_get_encoder_id(dev,
1399 ATOM_DEVICE_TV1_SUPPORT, 1559 ATOM_DEVICE_TV1_SUPPORT,
@@ -1402,13 +1562,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1402 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1562 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1403 DRM_MODE_CONNECTOR_SVIDEO, 1563 DRM_MODE_CONNECTOR_SVIDEO,
1404 &ddc_i2c, 1564 &ddc_i2c,
1405 CONNECTOR_OBJECT_ID_SVIDEO); 1565 CONNECTOR_OBJECT_ID_SVIDEO,
1566 &hpd);
1406 break; 1567 break;
1407 case CT_MINI_EXTERNAL: 1568 case CT_MINI_EXTERNAL:
1408 DRM_INFO("Connector Table: %d (mini external tmds)\n", 1569 DRM_INFO("Connector Table: %d (mini external tmds)\n",
1409 rdev->mode_info.connector_table); 1570 rdev->mode_info.connector_table);
1410 /* DVI-I - tv dac, ext tmds */ 1571 /* DVI-I - tv dac, ext tmds */
1411 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1572 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1573 hpd.hpd = RADEON_HPD_2; /* ??? */
1412 radeon_add_legacy_encoder(dev, 1574 radeon_add_legacy_encoder(dev,
1413 radeon_get_encoder_id(dev, 1575 radeon_get_encoder_id(dev,
1414 ATOM_DEVICE_DFP2_SUPPORT, 1576 ATOM_DEVICE_DFP2_SUPPORT,
@@ -1424,8 +1586,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1424 ATOM_DEVICE_DFP2_SUPPORT | 1586 ATOM_DEVICE_DFP2_SUPPORT |
1425 ATOM_DEVICE_CRT2_SUPPORT, 1587 ATOM_DEVICE_CRT2_SUPPORT,
1426 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1588 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1427 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1589 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1590 &hpd);
1428 /* TV - TV DAC */ 1591 /* TV - TV DAC */
1592 ddc_i2c.valid = false;
1593 hpd.hpd = RADEON_HPD_NONE;
1429 radeon_add_legacy_encoder(dev, 1594 radeon_add_legacy_encoder(dev,
1430 radeon_get_encoder_id(dev, 1595 radeon_get_encoder_id(dev,
1431 ATOM_DEVICE_TV1_SUPPORT, 1596 ATOM_DEVICE_TV1_SUPPORT,
@@ -1434,13 +1599,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1434 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, 1599 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1435 DRM_MODE_CONNECTOR_SVIDEO, 1600 DRM_MODE_CONNECTOR_SVIDEO,
1436 &ddc_i2c, 1601 &ddc_i2c,
1437 CONNECTOR_OBJECT_ID_SVIDEO); 1602 CONNECTOR_OBJECT_ID_SVIDEO,
1603 &hpd);
1438 break; 1604 break;
1439 case CT_MINI_INTERNAL: 1605 case CT_MINI_INTERNAL:
1440 DRM_INFO("Connector Table: %d (mini internal tmds)\n", 1606 DRM_INFO("Connector Table: %d (mini internal tmds)\n",
1441 rdev->mode_info.connector_table); 1607 rdev->mode_info.connector_table);
1442 /* DVI-I - tv dac, int tmds */ 1608 /* DVI-I - tv dac, int tmds */
1443 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1609 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1610 hpd.hpd = RADEON_HPD_1; /* ??? */
1444 radeon_add_legacy_encoder(dev, 1611 radeon_add_legacy_encoder(dev,
1445 radeon_get_encoder_id(dev, 1612 radeon_get_encoder_id(dev,
1446 ATOM_DEVICE_DFP1_SUPPORT, 1613 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1455,8 +1622,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1455 ATOM_DEVICE_DFP1_SUPPORT | 1622 ATOM_DEVICE_DFP1_SUPPORT |
1456 ATOM_DEVICE_CRT2_SUPPORT, 1623 ATOM_DEVICE_CRT2_SUPPORT,
1457 DRM_MODE_CONNECTOR_DVII, &ddc_i2c, 1624 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
1458 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 1625 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
1626 &hpd);
1459 /* TV - TV DAC */ 1627 /* TV - TV DAC */
1628 ddc_i2c.valid = false;
1629 hpd.hpd = RADEON_HPD_NONE;
1460 radeon_add_legacy_encoder(dev, 1630 radeon_add_legacy_encoder(dev,
1461 radeon_get_encoder_id(dev, 1631 radeon_get_encoder_id(dev,
1462 ATOM_DEVICE_TV1_SUPPORT, 1632 ATOM_DEVICE_TV1_SUPPORT,
@@ -1465,13 +1635,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1465 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT, 1635 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_TV1_SUPPORT,
1466 DRM_MODE_CONNECTOR_SVIDEO, 1636 DRM_MODE_CONNECTOR_SVIDEO,
1467 &ddc_i2c, 1637 &ddc_i2c,
1468 CONNECTOR_OBJECT_ID_SVIDEO); 1638 CONNECTOR_OBJECT_ID_SVIDEO,
1639 &hpd);
1469 break; 1640 break;
1470 case CT_IMAC_G5_ISIGHT: 1641 case CT_IMAC_G5_ISIGHT:
1471 DRM_INFO("Connector Table: %d (imac g5 isight)\n", 1642 DRM_INFO("Connector Table: %d (imac g5 isight)\n",
1472 rdev->mode_info.connector_table); 1643 rdev->mode_info.connector_table);
1473 /* DVI-D - int tmds */ 1644 /* DVI-D - int tmds */
1474 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); 1645 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1646 hpd.hpd = RADEON_HPD_1; /* ??? */
1475 radeon_add_legacy_encoder(dev, 1647 radeon_add_legacy_encoder(dev,
1476 radeon_get_encoder_id(dev, 1648 radeon_get_encoder_id(dev,
1477 ATOM_DEVICE_DFP1_SUPPORT, 1649 ATOM_DEVICE_DFP1_SUPPORT,
@@ -1479,9 +1651,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1479 ATOM_DEVICE_DFP1_SUPPORT); 1651 ATOM_DEVICE_DFP1_SUPPORT);
1480 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT, 1652 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_DFP1_SUPPORT,
1481 DRM_MODE_CONNECTOR_DVID, &ddc_i2c, 1653 DRM_MODE_CONNECTOR_DVID, &ddc_i2c,
1482 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); 1654 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
1655 &hpd);
1483 /* VGA - tv dac */ 1656 /* VGA - tv dac */
1484 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1657 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1658 hpd.hpd = RADEON_HPD_NONE;
1485 radeon_add_legacy_encoder(dev, 1659 radeon_add_legacy_encoder(dev,
1486 radeon_get_encoder_id(dev, 1660 radeon_get_encoder_id(dev,
1487 ATOM_DEVICE_CRT2_SUPPORT, 1661 ATOM_DEVICE_CRT2_SUPPORT,
@@ -1489,8 +1663,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1489 ATOM_DEVICE_CRT2_SUPPORT); 1663 ATOM_DEVICE_CRT2_SUPPORT);
1490 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1664 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1491 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1665 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1492 CONNECTOR_OBJECT_ID_VGA); 1666 CONNECTOR_OBJECT_ID_VGA,
1667 &hpd);
1493 /* TV - TV DAC */ 1668 /* TV - TV DAC */
1669 ddc_i2c.valid = false;
1670 hpd.hpd = RADEON_HPD_NONE;
1494 radeon_add_legacy_encoder(dev, 1671 radeon_add_legacy_encoder(dev,
1495 radeon_get_encoder_id(dev, 1672 radeon_get_encoder_id(dev,
1496 ATOM_DEVICE_TV1_SUPPORT, 1673 ATOM_DEVICE_TV1_SUPPORT,
@@ -1499,13 +1676,15 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1499 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1676 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1500 DRM_MODE_CONNECTOR_SVIDEO, 1677 DRM_MODE_CONNECTOR_SVIDEO,
1501 &ddc_i2c, 1678 &ddc_i2c,
1502 CONNECTOR_OBJECT_ID_SVIDEO); 1679 CONNECTOR_OBJECT_ID_SVIDEO,
1680 &hpd);
1503 break; 1681 break;
1504 case CT_EMAC: 1682 case CT_EMAC:
1505 DRM_INFO("Connector Table: %d (emac)\n", 1683 DRM_INFO("Connector Table: %d (emac)\n",
1506 rdev->mode_info.connector_table); 1684 rdev->mode_info.connector_table);
1507 /* VGA - primary dac */ 1685 /* VGA - primary dac */
1508 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1686 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1687 hpd.hpd = RADEON_HPD_NONE;
1509 radeon_add_legacy_encoder(dev, 1688 radeon_add_legacy_encoder(dev,
1510 radeon_get_encoder_id(dev, 1689 radeon_get_encoder_id(dev,
1511 ATOM_DEVICE_CRT1_SUPPORT, 1690 ATOM_DEVICE_CRT1_SUPPORT,
@@ -1513,9 +1692,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1513 ATOM_DEVICE_CRT1_SUPPORT); 1692 ATOM_DEVICE_CRT1_SUPPORT);
1514 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT, 1693 radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_CRT1_SUPPORT,
1515 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1694 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1516 CONNECTOR_OBJECT_ID_VGA); 1695 CONNECTOR_OBJECT_ID_VGA,
1696 &hpd);
1517 /* VGA - tv dac */ 1697 /* VGA - tv dac */
1518 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1698 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1699 hpd.hpd = RADEON_HPD_NONE;
1519 radeon_add_legacy_encoder(dev, 1700 radeon_add_legacy_encoder(dev,
1520 radeon_get_encoder_id(dev, 1701 radeon_get_encoder_id(dev,
1521 ATOM_DEVICE_CRT2_SUPPORT, 1702 ATOM_DEVICE_CRT2_SUPPORT,
@@ -1523,8 +1704,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1523 ATOM_DEVICE_CRT2_SUPPORT); 1704 ATOM_DEVICE_CRT2_SUPPORT);
1524 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT, 1705 radeon_add_legacy_connector(dev, 1, ATOM_DEVICE_CRT2_SUPPORT,
1525 DRM_MODE_CONNECTOR_VGA, &ddc_i2c, 1706 DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
1526 CONNECTOR_OBJECT_ID_VGA); 1707 CONNECTOR_OBJECT_ID_VGA,
1708 &hpd);
1527 /* TV - TV DAC */ 1709 /* TV - TV DAC */
1710 ddc_i2c.valid = false;
1711 hpd.hpd = RADEON_HPD_NONE;
1528 radeon_add_legacy_encoder(dev, 1712 radeon_add_legacy_encoder(dev,
1529 radeon_get_encoder_id(dev, 1713 radeon_get_encoder_id(dev,
1530 ATOM_DEVICE_TV1_SUPPORT, 1714 ATOM_DEVICE_TV1_SUPPORT,
@@ -1533,7 +1717,8 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1533 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT, 1717 radeon_add_legacy_connector(dev, 2, ATOM_DEVICE_TV1_SUPPORT,
1534 DRM_MODE_CONNECTOR_SVIDEO, 1718 DRM_MODE_CONNECTOR_SVIDEO,
1535 &ddc_i2c, 1719 &ddc_i2c,
1536 CONNECTOR_OBJECT_ID_SVIDEO); 1720 CONNECTOR_OBJECT_ID_SVIDEO,
1721 &hpd);
1537 break; 1722 break;
1538 default: 1723 default:
1539 DRM_INFO("Connector table: %d (invalid)\n", 1724 DRM_INFO("Connector table: %d (invalid)\n",
@@ -1550,7 +1735,8 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1550 int bios_index, 1735 int bios_index,
1551 enum radeon_combios_connector 1736 enum radeon_combios_connector
1552 *legacy_connector, 1737 *legacy_connector,
1553 struct radeon_i2c_bus_rec *ddc_i2c) 1738 struct radeon_i2c_bus_rec *ddc_i2c,
1739 struct radeon_hpd *hpd)
1554{ 1740{
1555 struct radeon_device *rdev = dev->dev_private; 1741 struct radeon_device *rdev = dev->dev_private;
1556 1742
@@ -1558,29 +1744,26 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
1558 if ((rdev->family == CHIP_RS400 || 1744 if ((rdev->family == CHIP_RS400 ||
1559 rdev->family == CHIP_RS480) && 1745 rdev->family == CHIP_RS480) &&
1560 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) 1746 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1561 *ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_MONID); 1747 *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1562 else if ((rdev->family == CHIP_RS400 || 1748 else if ((rdev->family == CHIP_RS400 ||
1563 rdev->family == CHIP_RS480) && 1749 rdev->family == CHIP_RS480) &&
1564 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) { 1750 ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
1565 ddc_i2c->valid = true; 1751 *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
1566 ddc_i2c->mask_clk_mask = (0x20 << 8); 1752 ddc_i2c->mask_clk_mask = (0x20 << 8);
1567 ddc_i2c->mask_data_mask = 0x80; 1753 ddc_i2c->mask_data_mask = 0x80;
1568 ddc_i2c->a_clk_mask = (0x20 << 8); 1754 ddc_i2c->a_clk_mask = (0x20 << 8);
1569 ddc_i2c->a_data_mask = 0x80; 1755 ddc_i2c->a_data_mask = 0x80;
1570 ddc_i2c->put_clk_mask = (0x20 << 8); 1756 ddc_i2c->en_clk_mask = (0x20 << 8);
1571 ddc_i2c->put_data_mask = 0x80; 1757 ddc_i2c->en_data_mask = 0x80;
1572 ddc_i2c->get_clk_mask = (0x20 << 8); 1758 ddc_i2c->y_clk_mask = (0x20 << 8);
1573 ddc_i2c->get_data_mask = 0x80; 1759 ddc_i2c->y_data_mask = 0x80;
1574 ddc_i2c->mask_clk_reg = RADEON_GPIOPAD_MASK;
1575 ddc_i2c->mask_data_reg = RADEON_GPIOPAD_MASK;
1576 ddc_i2c->a_clk_reg = RADEON_GPIOPAD_A;
1577 ddc_i2c->a_data_reg = RADEON_GPIOPAD_A;
1578 ddc_i2c->put_clk_reg = RADEON_GPIOPAD_EN;
1579 ddc_i2c->put_data_reg = RADEON_GPIOPAD_EN;
1580 ddc_i2c->get_clk_reg = RADEON_LCD_GPIO_Y_REG;
1581 ddc_i2c->get_data_reg = RADEON_LCD_GPIO_Y_REG;
1582 } 1760 }
1583 1761
1762 /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
1763 if ((rdev->family >= CHIP_R300) &&
1764 ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
1765 *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1766
1584 /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, 1767 /* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
1585 one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ 1768 one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
1586 if (dev->pdev->device == 0x515e && 1769 if (dev->pdev->device == 0x515e &&
@@ -1624,6 +1807,12 @@ static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev)
1624 dev->pdev->subsystem_device == 0x280a) 1807 dev->pdev->subsystem_device == 0x280a)
1625 return false; 1808 return false;
1626 1809
1810 /* MSI S270 has non-existent TV port */
1811 if (dev->pdev->device == 0x5955 &&
1812 dev->pdev->subsystem_vendor == 0x1462 &&
1813 dev->pdev->subsystem_device == 0x0131)
1814 return false;
1815
1627 return true; 1816 return true;
1628} 1817}
1629 1818
@@ -1671,6 +1860,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1671 enum radeon_combios_connector connector; 1860 enum radeon_combios_connector connector;
1672 int i = 0; 1861 int i = 0;
1673 struct radeon_i2c_bus_rec ddc_i2c; 1862 struct radeon_i2c_bus_rec ddc_i2c;
1863 struct radeon_hpd hpd;
1674 1864
1675 if (rdev->bios == NULL) 1865 if (rdev->bios == NULL)
1676 return false; 1866 return false;
@@ -1691,26 +1881,40 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1691 switch (ddc_type) { 1881 switch (ddc_type) {
1692 case DDC_MONID: 1882 case DDC_MONID:
1693 ddc_i2c = 1883 ddc_i2c =
1694 combios_setup_i2c_bus(RADEON_GPIO_MONID); 1884 combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
1695 break; 1885 break;
1696 case DDC_DVI: 1886 case DDC_DVI:
1697 ddc_i2c = 1887 ddc_i2c =
1698 combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 1888 combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
1699 break; 1889 break;
1700 case DDC_VGA: 1890 case DDC_VGA:
1701 ddc_i2c = 1891 ddc_i2c =
1702 combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 1892 combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
1703 break; 1893 break;
1704 case DDC_CRT2: 1894 case DDC_CRT2:
1705 ddc_i2c = 1895 ddc_i2c =
1706 combios_setup_i2c_bus(RADEON_GPIO_CRT2_DDC); 1896 combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
1707 break; 1897 break;
1708 default: 1898 default:
1709 break; 1899 break;
1710 } 1900 }
1711 1901
1902 switch (connector) {
1903 case CONNECTOR_PROPRIETARY_LEGACY:
1904 case CONNECTOR_DVI_I_LEGACY:
1905 case CONNECTOR_DVI_D_LEGACY:
1906 if ((tmp >> 4) & 0x1)
1907 hpd.hpd = RADEON_HPD_2;
1908 else
1909 hpd.hpd = RADEON_HPD_1;
1910 break;
1911 default:
1912 hpd.hpd = RADEON_HPD_NONE;
1913 break;
1914 }
1915
1712 if (!radeon_apply_legacy_quirks(dev, i, &connector, 1916 if (!radeon_apply_legacy_quirks(dev, i, &connector,
1713 &ddc_i2c)) 1917 &ddc_i2c, &hpd))
1714 continue; 1918 continue;
1715 1919
1716 switch (connector) { 1920 switch (connector) {
@@ -1727,7 +1931,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1727 legacy_connector_convert 1931 legacy_connector_convert
1728 [connector], 1932 [connector],
1729 &ddc_i2c, 1933 &ddc_i2c,
1730 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D); 1934 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
1935 &hpd);
1731 break; 1936 break;
1732 case CONNECTOR_CRT_LEGACY: 1937 case CONNECTOR_CRT_LEGACY:
1733 if (tmp & 0x1) { 1938 if (tmp & 0x1) {
@@ -1753,7 +1958,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1753 legacy_connector_convert 1958 legacy_connector_convert
1754 [connector], 1959 [connector],
1755 &ddc_i2c, 1960 &ddc_i2c,
1756 CONNECTOR_OBJECT_ID_VGA); 1961 CONNECTOR_OBJECT_ID_VGA,
1962 &hpd);
1757 break; 1963 break;
1758 case CONNECTOR_DVI_I_LEGACY: 1964 case CONNECTOR_DVI_I_LEGACY:
1759 devices = 0; 1965 devices = 0;
@@ -1799,7 +2005,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1799 legacy_connector_convert 2005 legacy_connector_convert
1800 [connector], 2006 [connector],
1801 &ddc_i2c, 2007 &ddc_i2c,
1802 connector_object_id); 2008 connector_object_id,
2009 &hpd);
1803 break; 2010 break;
1804 case CONNECTOR_DVI_D_LEGACY: 2011 case CONNECTOR_DVI_D_LEGACY:
1805 if ((tmp >> 4) & 0x1) { 2012 if ((tmp >> 4) & 0x1) {
@@ -1817,7 +2024,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1817 legacy_connector_convert 2024 legacy_connector_convert
1818 [connector], 2025 [connector],
1819 &ddc_i2c, 2026 &ddc_i2c,
1820 connector_object_id); 2027 connector_object_id,
2028 &hpd);
1821 break; 2029 break;
1822 case CONNECTOR_CTV_LEGACY: 2030 case CONNECTOR_CTV_LEGACY:
1823 case CONNECTOR_STV_LEGACY: 2031 case CONNECTOR_STV_LEGACY:
@@ -1832,7 +2040,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1832 legacy_connector_convert 2040 legacy_connector_convert
1833 [connector], 2041 [connector],
1834 &ddc_i2c, 2042 &ddc_i2c,
1835 CONNECTOR_OBJECT_ID_SVIDEO); 2043 CONNECTOR_OBJECT_ID_SVIDEO,
2044 &hpd);
1836 break; 2045 break;
1837 default: 2046 default:
1838 DRM_ERROR("Unknown connector type: %d\n", 2047 DRM_ERROR("Unknown connector type: %d\n",
@@ -1858,14 +2067,16 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1858 0), 2067 0),
1859 ATOM_DEVICE_DFP1_SUPPORT); 2068 ATOM_DEVICE_DFP1_SUPPORT);
1860 2069
1861 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_DVI_DDC); 2070 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
2071 hpd.hpd = RADEON_HPD_NONE;
1862 radeon_add_legacy_connector(dev, 2072 radeon_add_legacy_connector(dev,
1863 0, 2073 0,
1864 ATOM_DEVICE_CRT1_SUPPORT | 2074 ATOM_DEVICE_CRT1_SUPPORT |
1865 ATOM_DEVICE_DFP1_SUPPORT, 2075 ATOM_DEVICE_DFP1_SUPPORT,
1866 DRM_MODE_CONNECTOR_DVII, 2076 DRM_MODE_CONNECTOR_DVII,
1867 &ddc_i2c, 2077 &ddc_i2c,
1868 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I); 2078 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2079 &hpd);
1869 } else { 2080 } else {
1870 uint16_t crt_info = 2081 uint16_t crt_info =
1871 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); 2082 combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
@@ -1876,13 +2087,15 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1876 ATOM_DEVICE_CRT1_SUPPORT, 2087 ATOM_DEVICE_CRT1_SUPPORT,
1877 1), 2088 1),
1878 ATOM_DEVICE_CRT1_SUPPORT); 2089 ATOM_DEVICE_CRT1_SUPPORT);
1879 ddc_i2c = combios_setup_i2c_bus(RADEON_GPIO_VGA_DDC); 2090 ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
2091 hpd.hpd = RADEON_HPD_NONE;
1880 radeon_add_legacy_connector(dev, 2092 radeon_add_legacy_connector(dev,
1881 0, 2093 0,
1882 ATOM_DEVICE_CRT1_SUPPORT, 2094 ATOM_DEVICE_CRT1_SUPPORT,
1883 DRM_MODE_CONNECTOR_VGA, 2095 DRM_MODE_CONNECTOR_VGA,
1884 &ddc_i2c, 2096 &ddc_i2c,
1885 CONNECTOR_OBJECT_ID_VGA); 2097 CONNECTOR_OBJECT_ID_VGA,
2098 &hpd);
1886 } else { 2099 } else {
1887 DRM_DEBUG("No connector info found\n"); 2100 DRM_DEBUG("No connector info found\n");
1888 return false; 2101 return false;
@@ -1910,27 +2123,27 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1910 case DDC_MONID: 2123 case DDC_MONID:
1911 ddc_i2c = 2124 ddc_i2c =
1912 combios_setup_i2c_bus 2125 combios_setup_i2c_bus
1913 (RADEON_GPIO_MONID); 2126 (rdev, RADEON_GPIO_MONID);
1914 break; 2127 break;
1915 case DDC_DVI: 2128 case DDC_DVI:
1916 ddc_i2c = 2129 ddc_i2c =
1917 combios_setup_i2c_bus 2130 combios_setup_i2c_bus
1918 (RADEON_GPIO_DVI_DDC); 2131 (rdev, RADEON_GPIO_DVI_DDC);
1919 break; 2132 break;
1920 case DDC_VGA: 2133 case DDC_VGA:
1921 ddc_i2c = 2134 ddc_i2c =
1922 combios_setup_i2c_bus 2135 combios_setup_i2c_bus
1923 (RADEON_GPIO_VGA_DDC); 2136 (rdev, RADEON_GPIO_VGA_DDC);
1924 break; 2137 break;
1925 case DDC_CRT2: 2138 case DDC_CRT2:
1926 ddc_i2c = 2139 ddc_i2c =
1927 combios_setup_i2c_bus 2140 combios_setup_i2c_bus
1928 (RADEON_GPIO_CRT2_DDC); 2141 (rdev, RADEON_GPIO_CRT2_DDC);
1929 break; 2142 break;
1930 case DDC_LCD: 2143 case DDC_LCD:
1931 ddc_i2c = 2144 ddc_i2c =
1932 combios_setup_i2c_bus 2145 combios_setup_i2c_bus
1933 (RADEON_LCD_GPIO_MASK); 2146 (rdev, RADEON_GPIOPAD_MASK);
1934 ddc_i2c.mask_clk_mask = 2147 ddc_i2c.mask_clk_mask =
1935 RBIOS32(lcd_ddc_info + 3); 2148 RBIOS32(lcd_ddc_info + 3);
1936 ddc_i2c.mask_data_mask = 2149 ddc_i2c.mask_data_mask =
@@ -1939,19 +2152,19 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1939 RBIOS32(lcd_ddc_info + 3); 2152 RBIOS32(lcd_ddc_info + 3);
1940 ddc_i2c.a_data_mask = 2153 ddc_i2c.a_data_mask =
1941 RBIOS32(lcd_ddc_info + 7); 2154 RBIOS32(lcd_ddc_info + 7);
1942 ddc_i2c.put_clk_mask = 2155 ddc_i2c.en_clk_mask =
1943 RBIOS32(lcd_ddc_info + 3); 2156 RBIOS32(lcd_ddc_info + 3);
1944 ddc_i2c.put_data_mask = 2157 ddc_i2c.en_data_mask =
1945 RBIOS32(lcd_ddc_info + 7); 2158 RBIOS32(lcd_ddc_info + 7);
1946 ddc_i2c.get_clk_mask = 2159 ddc_i2c.y_clk_mask =
1947 RBIOS32(lcd_ddc_info + 3); 2160 RBIOS32(lcd_ddc_info + 3);
1948 ddc_i2c.get_data_mask = 2161 ddc_i2c.y_data_mask =
1949 RBIOS32(lcd_ddc_info + 7); 2162 RBIOS32(lcd_ddc_info + 7);
1950 break; 2163 break;
1951 case DDC_GPIO: 2164 case DDC_GPIO:
1952 ddc_i2c = 2165 ddc_i2c =
1953 combios_setup_i2c_bus 2166 combios_setup_i2c_bus
1954 (RADEON_MDGPIO_EN_REG); 2167 (rdev, RADEON_MDGPIO_MASK);
1955 ddc_i2c.mask_clk_mask = 2168 ddc_i2c.mask_clk_mask =
1956 RBIOS32(lcd_ddc_info + 3); 2169 RBIOS32(lcd_ddc_info + 3);
1957 ddc_i2c.mask_data_mask = 2170 ddc_i2c.mask_data_mask =
@@ -1960,13 +2173,13 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1960 RBIOS32(lcd_ddc_info + 3); 2173 RBIOS32(lcd_ddc_info + 3);
1961 ddc_i2c.a_data_mask = 2174 ddc_i2c.a_data_mask =
1962 RBIOS32(lcd_ddc_info + 7); 2175 RBIOS32(lcd_ddc_info + 7);
1963 ddc_i2c.put_clk_mask = 2176 ddc_i2c.en_clk_mask =
1964 RBIOS32(lcd_ddc_info + 3); 2177 RBIOS32(lcd_ddc_info + 3);
1965 ddc_i2c.put_data_mask = 2178 ddc_i2c.en_data_mask =
1966 RBIOS32(lcd_ddc_info + 7); 2179 RBIOS32(lcd_ddc_info + 7);
1967 ddc_i2c.get_clk_mask = 2180 ddc_i2c.y_clk_mask =
1968 RBIOS32(lcd_ddc_info + 3); 2181 RBIOS32(lcd_ddc_info + 3);
1969 ddc_i2c.get_data_mask = 2182 ddc_i2c.y_data_mask =
1970 RBIOS32(lcd_ddc_info + 7); 2183 RBIOS32(lcd_ddc_info + 7);
1971 break; 2184 break;
1972 default: 2185 default:
@@ -1977,12 +2190,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1977 } else 2190 } else
1978 ddc_i2c.valid = false; 2191 ddc_i2c.valid = false;
1979 2192
2193 hpd.hpd = RADEON_HPD_NONE;
1980 radeon_add_legacy_connector(dev, 2194 radeon_add_legacy_connector(dev,
1981 5, 2195 5,
1982 ATOM_DEVICE_LCD1_SUPPORT, 2196 ATOM_DEVICE_LCD1_SUPPORT,
1983 DRM_MODE_CONNECTOR_LVDS, 2197 DRM_MODE_CONNECTOR_LVDS,
1984 &ddc_i2c, 2198 &ddc_i2c,
1985 CONNECTOR_OBJECT_ID_LVDS); 2199 CONNECTOR_OBJECT_ID_LVDS,
2200 &hpd);
1986 } 2201 }
1987 } 2202 }
1988 2203
@@ -1993,6 +2208,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
1993 if (tv_info) { 2208 if (tv_info) {
1994 if (RBIOS8(tv_info + 6) == 'T') { 2209 if (RBIOS8(tv_info + 6) == 'T') {
1995 if (radeon_apply_legacy_tv_quirks(dev)) { 2210 if (radeon_apply_legacy_tv_quirks(dev)) {
2211 hpd.hpd = RADEON_HPD_NONE;
1996 radeon_add_legacy_encoder(dev, 2212 radeon_add_legacy_encoder(dev,
1997 radeon_get_encoder_id 2213 radeon_get_encoder_id
1998 (dev, 2214 (dev,
@@ -2003,7 +2219,8 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2003 ATOM_DEVICE_TV1_SUPPORT, 2219 ATOM_DEVICE_TV1_SUPPORT,
2004 DRM_MODE_CONNECTOR_SVIDEO, 2220 DRM_MODE_CONNECTOR_SVIDEO,
2005 &ddc_i2c, 2221 &ddc_i2c,
2006 CONNECTOR_OBJECT_ID_SVIDEO); 2222 CONNECTOR_OBJECT_ID_SVIDEO,
2223 &hpd);
2007 } 2224 }
2008 } 2225 }
2009 } 2226 }
@@ -2014,6 +2231,193 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
2014 return true; 2231 return true;
2015} 2232}
2016 2233
2234void radeon_external_tmds_setup(struct drm_encoder *encoder)
2235{
2236 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2237 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2238
2239 if (!tmds)
2240 return;
2241
2242 switch (tmds->dvo_chip) {
2243 case DVO_SIL164:
2244 /* sil 164 */
2245 radeon_i2c_do_lock(tmds->i2c_bus, 1);
2246 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2247 tmds->slave_addr,
2248 0x08, 0x30);
2249 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2250 tmds->slave_addr,
2251 0x09, 0x00);
2252 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2253 tmds->slave_addr,
2254 0x0a, 0x90);
2255 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2256 tmds->slave_addr,
2257 0x0c, 0x89);
2258 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2259 tmds->slave_addr,
2260 0x08, 0x3b);
2261 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2262 break;
2263 case DVO_SIL1178:
2264 /* sil 1178 - untested */
2265 /*
2266 * 0x0f, 0x44
2267 * 0x0f, 0x4c
2268 * 0x0e, 0x01
2269 * 0x0a, 0x80
2270 * 0x09, 0x30
2271 * 0x0c, 0xc9
2272 * 0x0d, 0x70
2273 * 0x08, 0x32
2274 * 0x08, 0x33
2275 */
2276 break;
2277 default:
2278 break;
2279 }
2280
2281}
2282
2283bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2284{
2285 struct drm_device *dev = encoder->dev;
2286 struct radeon_device *rdev = dev->dev_private;
2287 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2288 uint16_t offset;
2289 uint8_t blocks, slave_addr, rev;
2290 uint32_t index, id;
2291 uint32_t reg, val, and_mask, or_mask;
2292 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
2293
2294 if (rdev->bios == NULL)
2295 return false;
2296
2297 if (!tmds)
2298 return false;
2299
2300 if (rdev->flags & RADEON_IS_IGP) {
2301 offset = combios_get_table_offset(dev, COMBIOS_TMDS_POWER_ON_TABLE);
2302 rev = RBIOS8(offset);
2303 if (offset) {
2304 rev = RBIOS8(offset);
2305 if (rev > 1) {
2306 blocks = RBIOS8(offset + 3);
2307 index = offset + 4;
2308 while (blocks > 0) {
2309 id = RBIOS16(index);
2310 index += 2;
2311 switch (id >> 13) {
2312 case 0:
2313 reg = (id & 0x1fff) * 4;
2314 val = RBIOS32(index);
2315 index += 4;
2316 WREG32(reg, val);
2317 break;
2318 case 2:
2319 reg = (id & 0x1fff) * 4;
2320 and_mask = RBIOS32(index);
2321 index += 4;
2322 or_mask = RBIOS32(index);
2323 index += 4;
2324 val = RREG32(reg);
2325 val = (val & and_mask) | or_mask;
2326 WREG32(reg, val);
2327 break;
2328 case 3:
2329 val = RBIOS16(index);
2330 index += 2;
2331 udelay(val);
2332 break;
2333 case 4:
2334 val = RBIOS16(index);
2335 index += 2;
2336 udelay(val * 1000);
2337 break;
2338 case 6:
2339 slave_addr = id & 0xff;
2340 slave_addr >>= 1; /* 7 bit addressing */
2341 index++;
2342 reg = RBIOS8(index);
2343 index++;
2344 val = RBIOS8(index);
2345 index++;
2346 radeon_i2c_do_lock(tmds->i2c_bus, 1);
2347 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2348 slave_addr,
2349 reg, val);
2350 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2351 break;
2352 default:
2353 DRM_ERROR("Unknown id %d\n", id >> 13);
2354 break;
2355 }
2356 blocks--;
2357 }
2358 return true;
2359 }
2360 }
2361 } else {
2362 offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
2363 if (offset) {
2364 index = offset + 10;
2365 id = RBIOS16(index);
2366 while (id != 0xffff) {
2367 index += 2;
2368 switch (id >> 13) {
2369 case 0:
2370 reg = (id & 0x1fff) * 4;
2371 val = RBIOS32(index);
2372 WREG32(reg, val);
2373 break;
2374 case 2:
2375 reg = (id & 0x1fff) * 4;
2376 and_mask = RBIOS32(index);
2377 index += 4;
2378 or_mask = RBIOS32(index);
2379 index += 4;
2380 val = RREG32(reg);
2381 val = (val & and_mask) | or_mask;
2382 WREG32(reg, val);
2383 break;
2384 case 4:
2385 val = RBIOS16(index);
2386 index += 2;
2387 udelay(val);
2388 break;
2389 case 5:
2390 reg = id & 0x1fff;
2391 and_mask = RBIOS32(index);
2392 index += 4;
2393 or_mask = RBIOS32(index);
2394 index += 4;
2395 val = RREG32_PLL(reg);
2396 val = (val & and_mask) | or_mask;
2397 WREG32_PLL(reg, val);
2398 break;
2399 case 6:
2400 reg = id & 0x1fff;
2401 val = RBIOS8(index);
2402 index += 1;
2403 radeon_i2c_do_lock(tmds->i2c_bus, 1);
2404 radeon_i2c_sw_put_byte(tmds->i2c_bus,
2405 tmds->slave_addr,
2406 reg, val);
2407 radeon_i2c_do_lock(tmds->i2c_bus, 0);
2408 break;
2409 default:
2410 DRM_ERROR("Unknown id %d\n", id >> 13);
2411 break;
2412 }
2413 id = RBIOS16(index);
2414 }
2415 return true;
2416 }
2417 }
2418 return false;
2419}
2420
2017static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset) 2421static void combios_parse_mmio_table(struct drm_device *dev, uint16_t offset)
2018{ 2422{
2019 struct radeon_device *rdev = dev->dev_private; 2423 struct radeon_device *rdev = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 29763ceae3af..5eece186e03c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
40 struct drm_encoder *encoder, 40 struct drm_encoder *encoder,
41 bool connected); 41 bool connected);
42 42
43void radeon_connector_hotplug(struct drm_connector *connector)
44{
45 struct drm_device *dev = connector->dev;
46 struct radeon_device *rdev = dev->dev_private;
47 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
48
49 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
50 radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
51
52 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
53 if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
54 if (radeon_dp_needs_link_train(radeon_connector)) {
55 if (connector->encoder)
56 dp_link_train(connector->encoder, connector);
57 }
58 }
59 }
60
61}
62
43static void radeon_property_change_mode(struct drm_encoder *encoder) 63static void radeon_property_change_mode(struct drm_encoder *encoder)
44{ 64{
45 struct drm_crtc *crtc = encoder->crtc; 65 struct drm_crtc *crtc = encoder->crtc;
@@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
445 ret = connector_status_connected; 465 ret = connector_status_connected;
446 else { 466 else {
447 if (radeon_connector->ddc_bus) { 467 if (radeon_connector->ddc_bus) {
448 radeon_i2c_do_lock(radeon_connector, 1); 468 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
449 radeon_connector->edid = drm_get_edid(&radeon_connector->base, 469 radeon_connector->edid = drm_get_edid(&radeon_connector->base,
450 &radeon_connector->ddc_bus->adapter); 470 &radeon_connector->ddc_bus->adapter);
451 radeon_i2c_do_lock(radeon_connector, 0); 471 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
452 if (radeon_connector->edid) 472 if (radeon_connector->edid)
453 ret = connector_status_connected; 473 ret = connector_status_connected;
454 } 474 }
@@ -553,17 +573,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
553 if (!encoder) 573 if (!encoder)
554 ret = connector_status_disconnected; 574 ret = connector_status_disconnected;
555 575
556 radeon_i2c_do_lock(radeon_connector, 1); 576 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
557 dret = radeon_ddc_probe(radeon_connector); 577 dret = radeon_ddc_probe(radeon_connector);
558 radeon_i2c_do_lock(radeon_connector, 0); 578 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
559 if (dret) { 579 if (dret) {
560 if (radeon_connector->edid) { 580 if (radeon_connector->edid) {
561 kfree(radeon_connector->edid); 581 kfree(radeon_connector->edid);
562 radeon_connector->edid = NULL; 582 radeon_connector->edid = NULL;
563 } 583 }
564 radeon_i2c_do_lock(radeon_connector, 1); 584 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
565 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 585 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
566 radeon_i2c_do_lock(radeon_connector, 0); 586 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
567 587
568 if (!radeon_connector->edid) { 588 if (!radeon_connector->edid) {
569 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 589 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +728,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
708 enum drm_connector_status ret = connector_status_disconnected; 728 enum drm_connector_status ret = connector_status_disconnected;
709 bool dret; 729 bool dret;
710 730
711 radeon_i2c_do_lock(radeon_connector, 1); 731 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
712 dret = radeon_ddc_probe(radeon_connector); 732 dret = radeon_ddc_probe(radeon_connector);
713 radeon_i2c_do_lock(radeon_connector, 0); 733 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
714 if (dret) { 734 if (dret) {
715 if (radeon_connector->edid) { 735 if (radeon_connector->edid) {
716 kfree(radeon_connector->edid); 736 kfree(radeon_connector->edid);
717 radeon_connector->edid = NULL; 737 radeon_connector->edid = NULL;
718 } 738 }
719 radeon_i2c_do_lock(radeon_connector, 1); 739 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
720 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 740 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
721 radeon_i2c_do_lock(radeon_connector, 0); 741 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
722 742
723 if (!radeon_connector->edid) { 743 if (!radeon_connector->edid) {
724 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 744 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
735 ret = connector_status_disconnected; 755 ret = connector_status_disconnected;
736 } else 756 } else
737 ret = connector_status_connected; 757 ret = connector_status_connected;
758
759 /* multiple connectors on the same encoder with the same ddc line
760 * This tends to be HDMI and DVI on the same encoder with the
761 * same ddc line. If the edid says HDMI, consider the HDMI port
762 * connected and the DVI port disconnected. If the edid doesn't
763 * say HDMI, vice versa.
764 */
765 if (radeon_connector->shared_ddc && connector_status_connected) {
766 struct drm_device *dev = connector->dev;
767 struct drm_connector *list_connector;
768 struct radeon_connector *list_radeon_connector;
769 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
770 if (connector == list_connector)
771 continue;
772 list_radeon_connector = to_radeon_connector(list_connector);
773 if (radeon_connector->devices == list_radeon_connector->devices) {
774 if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
775 if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
776 kfree(radeon_connector->edid);
777 radeon_connector->edid = NULL;
778 ret = connector_status_disconnected;
779 }
780 } else {
781 if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
782 (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
783 kfree(radeon_connector->edid);
784 radeon_connector->edid = NULL;
785 ret = connector_status_disconnected;
786 }
787 }
788 }
789 }
790 }
738 } 791 }
739 } 792 }
740 793
@@ -863,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
863 .force = radeon_dvi_force, 916 .force = radeon_dvi_force,
864}; 917};
865 918
919static void radeon_dp_connector_destroy(struct drm_connector *connector)
920{
921 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
922 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
923
924 if (radeon_connector->ddc_bus)
925 radeon_i2c_destroy(radeon_connector->ddc_bus);
926 if (radeon_connector->edid)
927 kfree(radeon_connector->edid);
928 if (radeon_dig_connector->dp_i2c_bus)
929 radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
930 kfree(radeon_connector->con_priv);
931 drm_sysfs_connector_remove(connector);
932 drm_connector_cleanup(connector);
933 kfree(connector);
934}
935
936static int radeon_dp_get_modes(struct drm_connector *connector)
937{
938 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
939 int ret;
940
941 ret = radeon_ddc_get_modes(radeon_connector);
942 return ret;
943}
944
945static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
946{
947 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
948 enum drm_connector_status ret = connector_status_disconnected;
949 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
950 u8 sink_type;
951
952 if (radeon_connector->edid) {
953 kfree(radeon_connector->edid);
954 radeon_connector->edid = NULL;
955 }
956
957 sink_type = radeon_dp_getsinktype(radeon_connector);
958 if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
959 if (radeon_dp_getdpcd(radeon_connector)) {
960 radeon_dig_connector->dp_sink_type = sink_type;
961 ret = connector_status_connected;
962 }
963 } else {
964 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
965 if (radeon_ddc_probe(radeon_connector)) {
966 radeon_dig_connector->dp_sink_type = sink_type;
967 ret = connector_status_connected;
968 }
969 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
970 }
971
972 return ret;
973}
974
975static int radeon_dp_mode_valid(struct drm_connector *connector,
976 struct drm_display_mode *mode)
977{
978 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
979 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
980
981 /* XXX check mode bandwidth */
982
983 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
984 return radeon_dp_mode_valid_helper(radeon_connector, mode);
985 else
986 return MODE_OK;
987}
988
989struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
990 .get_modes = radeon_dp_get_modes,
991 .mode_valid = radeon_dp_mode_valid,
992 .best_encoder = radeon_dvi_encoder,
993};
994
995struct drm_connector_funcs radeon_dp_connector_funcs = {
996 .dpms = drm_helper_connector_dpms,
997 .detect = radeon_dp_detect,
998 .fill_modes = drm_helper_probe_single_connector_modes,
999 .set_property = radeon_connector_set_property,
1000 .destroy = radeon_dp_connector_destroy,
1001 .force = radeon_dvi_force,
1002};
1003
866void 1004void
867radeon_add_atom_connector(struct drm_device *dev, 1005radeon_add_atom_connector(struct drm_device *dev,
868 uint32_t connector_id, 1006 uint32_t connector_id,
@@ -871,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev,
871 struct radeon_i2c_bus_rec *i2c_bus, 1009 struct radeon_i2c_bus_rec *i2c_bus,
872 bool linkb, 1010 bool linkb,
873 uint32_t igp_lane_info, 1011 uint32_t igp_lane_info,
874 uint16_t connector_object_id) 1012 uint16_t connector_object_id,
1013 struct radeon_hpd *hpd)
875{ 1014{
876 struct radeon_device *rdev = dev->dev_private; 1015 struct radeon_device *rdev = dev->dev_private;
877 struct drm_connector *connector; 1016 struct drm_connector *connector;
@@ -911,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev,
911 radeon_connector->devices = supported_device; 1050 radeon_connector->devices = supported_device;
912 radeon_connector->shared_ddc = shared_ddc; 1051 radeon_connector->shared_ddc = shared_ddc;
913 radeon_connector->connector_object_id = connector_object_id; 1052 radeon_connector->connector_object_id = connector_object_id;
1053 radeon_connector->hpd = *hpd;
914 switch (connector_type) { 1054 switch (connector_type) {
915 case DRM_MODE_CONNECTOR_VGA: 1055 case DRM_MODE_CONNECTOR_VGA:
916 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1056 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
963 drm_connector_attach_property(&radeon_connector->base, 1103 drm_connector_attach_property(&radeon_connector->base,
964 rdev->mode_info.coherent_mode_property, 1104 rdev->mode_info.coherent_mode_property,
965 1); 1105 1);
966 radeon_connector->dac_load_detect = true; 1106 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
967 drm_connector_attach_property(&radeon_connector->base, 1107 radeon_connector->dac_load_detect = true;
968 rdev->mode_info.load_detect_property, 1108 drm_connector_attach_property(&radeon_connector->base,
969 1); 1109 rdev->mode_info.load_detect_property,
1110 1);
1111 }
970 break; 1112 break;
971 case DRM_MODE_CONNECTOR_HDMIA: 1113 case DRM_MODE_CONNECTOR_HDMIA:
972 case DRM_MODE_CONNECTOR_HDMIB: 1114 case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1139,23 @@ radeon_add_atom_connector(struct drm_device *dev,
997 radeon_dig_connector->linkb = linkb; 1139 radeon_dig_connector->linkb = linkb;
998 radeon_dig_connector->igp_lane_info = igp_lane_info; 1140 radeon_dig_connector->igp_lane_info = igp_lane_info;
999 radeon_connector->con_priv = radeon_dig_connector; 1141 radeon_connector->con_priv = radeon_dig_connector;
1000 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1142 drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
1001 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1143 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
1002 if (ret) 1144 if (ret)
1003 goto failed; 1145 goto failed;
1004 if (i2c_bus->valid) { 1146 if (i2c_bus->valid) {
1147 /* add DP i2c bus */
1148 radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
1149 if (!radeon_dig_connector->dp_i2c_bus)
1150 goto failed;
1005 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 1151 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
1006 if (!radeon_connector->ddc_bus) 1152 if (!radeon_connector->ddc_bus)
1007 goto failed; 1153 goto failed;
1008 } 1154 }
1009 subpixel_order = SubPixelHorizontalRGB; 1155 subpixel_order = SubPixelHorizontalRGB;
1156 drm_connector_attach_property(&radeon_connector->base,
1157 rdev->mode_info.coherent_mode_property,
1158 1);
1010 break; 1159 break;
1011 case DRM_MODE_CONNECTOR_SVIDEO: 1160 case DRM_MODE_CONNECTOR_SVIDEO:
1012 case DRM_MODE_CONNECTOR_Composite: 1161 case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1169,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1020 drm_connector_attach_property(&radeon_connector->base, 1169 drm_connector_attach_property(&radeon_connector->base,
1021 rdev->mode_info.load_detect_property, 1170 rdev->mode_info.load_detect_property,
1022 1); 1171 1);
1172 drm_connector_attach_property(&radeon_connector->base,
1173 rdev->mode_info.tv_std_property,
1174 1);
1023 } 1175 }
1024 break; 1176 break;
1025 case DRM_MODE_CONNECTOR_LVDS: 1177 case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
1038 if (!radeon_connector->ddc_bus) 1190 if (!radeon_connector->ddc_bus)
1039 goto failed; 1191 goto failed;
1040 } 1192 }
1041 drm_mode_create_scaling_mode_property(dev);
1042 drm_connector_attach_property(&radeon_connector->base, 1193 drm_connector_attach_property(&radeon_connector->base,
1043 dev->mode_config.scaling_mode_property, 1194 dev->mode_config.scaling_mode_property,
1044 DRM_MODE_SCALE_FULLSCREEN); 1195 DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1214,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1063 uint32_t supported_device, 1214 uint32_t supported_device,
1064 int connector_type, 1215 int connector_type,
1065 struct radeon_i2c_bus_rec *i2c_bus, 1216 struct radeon_i2c_bus_rec *i2c_bus,
1066 uint16_t connector_object_id) 1217 uint16_t connector_object_id,
1218 struct radeon_hpd *hpd)
1067{ 1219{
1068 struct radeon_device *rdev = dev->dev_private; 1220 struct radeon_device *rdev = dev->dev_private;
1069 struct drm_connector *connector; 1221 struct drm_connector *connector;
@@ -1093,6 +1245,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
1093 radeon_connector->connector_id = connector_id; 1245 radeon_connector->connector_id = connector_id;
1094 radeon_connector->devices = supported_device; 1246 radeon_connector->devices = supported_device;
1095 radeon_connector->connector_object_id = connector_object_id; 1247 radeon_connector->connector_object_id = connector_object_id;
1248 radeon_connector->hpd = *hpd;
1096 switch (connector_type) { 1249 switch (connector_type) {
1097 case DRM_MODE_CONNECTOR_VGA: 1250 case DRM_MODE_CONNECTOR_VGA:
1098 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1251 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1313,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
1160 drm_connector_attach_property(&radeon_connector->base, 1313 drm_connector_attach_property(&radeon_connector->base,
1161 rdev->mode_info.load_detect_property, 1314 rdev->mode_info.load_detect_property,
1162 1); 1315 1);
1316 drm_connector_attach_property(&radeon_connector->base,
1317 rdev->mode_info.tv_std_property,
1318 1);
1163 } 1319 }
1164 break; 1320 break;
1165 case DRM_MODE_CONNECTOR_LVDS: 1321 case DRM_MODE_CONNECTOR_LVDS:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 4f7afc79dd82..0b2f9c2ad2c1 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1941 for (t = 0; t < dev_priv->usec_timeout; t++) { 1941 for (t = 0; t < dev_priv->usec_timeout; t++) {
1942 u32 done_age = GET_SCRATCH(dev_priv, 1); 1942 u32 done_age = GET_SCRATCH(dev_priv, 1);
1943 DRM_DEBUG("done_age = %d\n", done_age); 1943 DRM_DEBUG("done_age = %d\n", done_age);
1944 for (i = start; i < dma->buf_count; i++) { 1944 for (i = 0; i < dma->buf_count; i++) {
1945 buf = dma->buflist[i]; 1945 buf = dma->buflist[start];
1946 buf_priv = buf->dev_private; 1946 buf_priv = buf->dev_private;
1947 if (buf->file_priv == NULL || (buf->pending && 1947 if (buf->file_priv == NULL || (buf->pending &&
1948 buf_priv->age <= 1948 buf_priv->age <=
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1951 buf->pending = 0; 1951 buf->pending = 0;
1952 return buf; 1952 return buf;
1953 } 1953 }
1954 start = 0; 1954 if (++start >= dma->buf_count)
1955 start = 0;
1955 } 1956 }
1956 1957
1957 if (t) { 1958 if (t) {
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1960 } 1961 }
1961 } 1962 }
1962 1963
1963 DRM_DEBUG("returning NULL!\n");
1964 return NULL; 1964 return NULL;
1965} 1965}
1966 1966
1967#if 0
1968struct drm_buf *radeon_freelist_get(struct drm_device * dev)
1969{
1970 struct drm_device_dma *dma = dev->dma;
1971 drm_radeon_private_t *dev_priv = dev->dev_private;
1972 drm_radeon_buf_priv_t *buf_priv;
1973 struct drm_buf *buf;
1974 int i, t;
1975 int start;
1976 u32 done_age;
1977
1978 done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
1979 if (++dev_priv->last_buf >= dma->buf_count)
1980 dev_priv->last_buf = 0;
1981
1982 start = dev_priv->last_buf;
1983 dev_priv->stats.freelist_loops++;
1984
1985 for (t = 0; t < 2; t++) {
1986 for (i = start; i < dma->buf_count; i++) {
1987 buf = dma->buflist[i];
1988 buf_priv = buf->dev_private;
1989 if (buf->file_priv == 0 || (buf->pending &&
1990 buf_priv->age <=
1991 done_age)) {
1992 dev_priv->stats.requested_bufs++;
1993 buf->pending = 0;
1994 return buf;
1995 }
1996 }
1997 start = 0;
1998 }
1999
2000 return NULL;
2001}
2002#endif
2003
2004void radeon_freelist_reset(struct drm_device * dev) 1967void radeon_freelist_reset(struct drm_device * dev)
2005{ 1968{
2006 struct drm_device_dma *dma = dev->dma; 1969 struct drm_device_dma *dma = dev->dma;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5ab2cf96a264..65590a0f1d93 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
76 } 76 }
77 p->relocs_ptr[i] = &p->relocs[i]; 77 p->relocs_ptr[i] = &p->relocs[i];
78 p->relocs[i].robj = p->relocs[i].gobj->driver_private; 78 p->relocs[i].robj = p->relocs[i].gobj->driver_private;
79 p->relocs[i].lobj.robj = p->relocs[i].robj; 79 p->relocs[i].lobj.bo = p->relocs[i].robj;
80 p->relocs[i].lobj.rdomain = r->read_domains; 80 p->relocs[i].lobj.rdomain = r->read_domains;
81 p->relocs[i].lobj.wdomain = r->write_domain; 81 p->relocs[i].lobj.wdomain = r->write_domain;
82 p->relocs[i].handle = r->handle; 82 p->relocs[i].handle = r->handle;
83 p->relocs[i].flags = r->flags; 83 p->relocs[i].flags = r->flags;
84 INIT_LIST_HEAD(&p->relocs[i].lobj.list); 84 INIT_LIST_HEAD(&p->relocs[i].lobj.list);
85 radeon_object_list_add_object(&p->relocs[i].lobj, 85 radeon_bo_list_add_object(&p->relocs[i].lobj,
86 &p->validated); 86 &p->validated);
87 } 87 }
88 } 88 }
89 return radeon_object_list_validate(&p->validated, p->ib->fence); 89 return radeon_bo_list_validate(&p->validated, p->ib->fence);
90} 90}
91 91
92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 92int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
190 unsigned i; 190 unsigned i;
191 191
192 if (error) { 192 if (error) {
193 radeon_object_list_unvalidate(&parser->validated); 193 radeon_bo_list_unvalidate(&parser->validated,
194 parser->ib->fence);
194 } else { 195 } else {
195 radeon_object_list_clean(&parser->validated); 196 radeon_bo_list_unreserve(&parser->validated);
196 } 197 }
197 for (i = 0; i < parser->nrelocs; i++) { 198 for (i = 0; i < parser->nrelocs; i++) {
198 if (parser->relocs[i].gobj) { 199 if (parser->relocs[i].gobj) {
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 41bb76fbe734..02bcdb1240c0 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
44 if (rdev->family < CHIP_R600) { 44 if (rdev->family < CHIP_R600) {
45 int i; 45 int i;
46 46
47 for (i = 0; i < 8; i++) { 47 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
48 WREG32(RADEON_SURFACE0_INFO + 48 if (rdev->surface_regs[i].bo)
49 i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), 49 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
50 0); 50 else
51 radeon_clear_surface_reg(rdev, i);
51 } 52 }
52 /* enable surfaces */ 53 /* enable surfaces */
53 WREG32(RADEON_SURFACE_CNTL, 0); 54 WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
208 209
209} 210}
210 211
212bool radeon_boot_test_post_card(struct radeon_device *rdev)
213{
214 if (radeon_card_posted(rdev))
215 return true;
216
217 if (rdev->bios) {
218 DRM_INFO("GPU not posted. posting now...\n");
219 if (rdev->is_atom_bios)
220 atom_asic_init(rdev->mode_info.atom_context);
221 else
222 radeon_combios_asic_init(rdev->ddev);
223 return true;
224 } else {
225 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
226 return false;
227 }
228}
229
211int radeon_dummy_page_init(struct radeon_device *rdev) 230int radeon_dummy_page_init(struct radeon_device *rdev)
212{ 231{
213 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO); 232 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -463,12 +482,16 @@ int radeon_atombios_init(struct radeon_device *rdev)
463 482
464 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); 483 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
465 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 484 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
485 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
466 return 0; 486 return 0;
467} 487}
468 488
469void radeon_atombios_fini(struct radeon_device *rdev) 489void radeon_atombios_fini(struct radeon_device *rdev)
470{ 490{
471 kfree(rdev->mode_info.atom_context); 491 if (rdev->mode_info.atom_context) {
492 kfree(rdev->mode_info.atom_context->scratch);
493 kfree(rdev->mode_info.atom_context);
494 }
472 kfree(rdev->mode_info.atom_card_info); 495 kfree(rdev->mode_info.atom_card_info);
473} 496}
474 497
@@ -544,16 +567,24 @@ int radeon_device_init(struct radeon_device *rdev,
544 mutex_init(&rdev->cs_mutex); 567 mutex_init(&rdev->cs_mutex);
545 mutex_init(&rdev->ib_pool.mutex); 568 mutex_init(&rdev->ib_pool.mutex);
546 mutex_init(&rdev->cp.mutex); 569 mutex_init(&rdev->cp.mutex);
570 if (rdev->family >= CHIP_R600)
571 spin_lock_init(&rdev->ih.lock);
572 mutex_init(&rdev->gem.mutex);
547 rwlock_init(&rdev->fence_drv.lock); 573 rwlock_init(&rdev->fence_drv.lock);
548 INIT_LIST_HEAD(&rdev->gem.objects); 574 INIT_LIST_HEAD(&rdev->gem.objects);
549 575
576 /* setup workqueue */
577 rdev->wq = create_workqueue("radeon");
578 if (rdev->wq == NULL)
579 return -ENOMEM;
580
550 /* Set asic functions */ 581 /* Set asic functions */
551 r = radeon_asic_init(rdev); 582 r = radeon_asic_init(rdev);
552 if (r) { 583 if (r) {
553 return r; 584 return r;
554 } 585 }
555 586
556 if (radeon_agpmode == -1) { 587 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
557 radeon_agp_disable(rdev); 588 radeon_agp_disable(rdev);
558 } 589 }
559 590
@@ -620,6 +651,7 @@ void radeon_device_fini(struct radeon_device *rdev)
620 DRM_INFO("radeon: finishing device.\n"); 651 DRM_INFO("radeon: finishing device.\n");
621 rdev->shutdown = true; 652 rdev->shutdown = true;
622 radeon_fini(rdev); 653 radeon_fini(rdev);
654 destroy_workqueue(rdev->wq);
623 vga_client_register(rdev->pdev, NULL, NULL, NULL); 655 vga_client_register(rdev->pdev, NULL, NULL, NULL);
624 iounmap(rdev->rmmio); 656 iounmap(rdev->rmmio);
625 rdev->rmmio = NULL; 657 rdev->rmmio = NULL;
@@ -633,6 +665,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
633{ 665{
634 struct radeon_device *rdev = dev->dev_private; 666 struct radeon_device *rdev = dev->dev_private;
635 struct drm_crtc *crtc; 667 struct drm_crtc *crtc;
668 int r;
636 669
637 if (dev == NULL || rdev == NULL) { 670 if (dev == NULL || rdev == NULL) {
638 return -ENODEV; 671 return -ENODEV;
@@ -643,26 +676,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
643 /* unpin the front buffers */ 676 /* unpin the front buffers */
644 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 677 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
645 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb); 678 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
646 struct radeon_object *robj; 679 struct radeon_bo *robj;
647 680
648 if (rfb == NULL || rfb->obj == NULL) { 681 if (rfb == NULL || rfb->obj == NULL) {
649 continue; 682 continue;
650 } 683 }
651 robj = rfb->obj->driver_private; 684 robj = rfb->obj->driver_private;
652 if (robj != rdev->fbdev_robj) { 685 if (robj != rdev->fbdev_rbo) {
653 radeon_object_unpin(robj); 686 r = radeon_bo_reserve(robj, false);
687 if (unlikely(r == 0)) {
688 radeon_bo_unpin(robj);
689 radeon_bo_unreserve(robj);
690 }
654 } 691 }
655 } 692 }
656 /* evict vram memory */ 693 /* evict vram memory */
657 radeon_object_evict_vram(rdev); 694 radeon_bo_evict_vram(rdev);
658 /* wait for gpu to finish processing current batch */ 695 /* wait for gpu to finish processing current batch */
659 radeon_fence_wait_last(rdev); 696 radeon_fence_wait_last(rdev);
660 697
661 radeon_save_bios_scratch_regs(rdev); 698 radeon_save_bios_scratch_regs(rdev);
662 699
663 radeon_suspend(rdev); 700 radeon_suspend(rdev);
701 radeon_hpd_fini(rdev);
664 /* evict remaining vram memory */ 702 /* evict remaining vram memory */
665 radeon_object_evict_vram(rdev); 703 radeon_bo_evict_vram(rdev);
666 704
667 pci_save_state(dev->pdev); 705 pci_save_state(dev->pdev);
668 if (state.event == PM_EVENT_SUSPEND) { 706 if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +733,8 @@ int radeon_resume_kms(struct drm_device *dev)
695 fb_set_suspend(rdev->fbdev_info, 0); 733 fb_set_suspend(rdev->fbdev_info, 0);
696 release_console_sem(); 734 release_console_sem();
697 735
736 /* reset hpd state */
737 radeon_hpd_init(rdev);
698 /* blat the mode back in */ 738 /* blat the mode back in */
699 drm_helper_resume_force_mode(dev); 739 drm_helper_resume_force_mode(dev);
700 return 0; 740 return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c85df4afcb7a..a133b833e45d 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -250,6 +250,16 @@ static const char *connector_names[13] = {
250 "HDMI-B", 250 "HDMI-B",
251}; 251};
252 252
253static const char *hpd_names[7] = {
254 "NONE",
255 "HPD1",
256 "HPD2",
257 "HPD3",
258 "HPD4",
259 "HPD5",
260 "HPD6",
261};
262
253static void radeon_print_display_setup(struct drm_device *dev) 263static void radeon_print_display_setup(struct drm_device *dev)
254{ 264{
255 struct drm_connector *connector; 265 struct drm_connector *connector;
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev)
264 radeon_connector = to_radeon_connector(connector); 274 radeon_connector = to_radeon_connector(connector);
265 DRM_INFO("Connector %d:\n", i); 275 DRM_INFO("Connector %d:\n", i);
266 DRM_INFO(" %s\n", connector_names[connector->connector_type]); 276 DRM_INFO(" %s\n", connector_names[connector->connector_type]);
277 if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
278 DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
267 if (radeon_connector->ddc_bus) 279 if (radeon_connector->ddc_bus)
268 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", 280 DRM_INFO(" DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
269 radeon_connector->ddc_bus->rec.mask_clk_reg, 281 radeon_connector->ddc_bus->rec.mask_clk_reg,
270 radeon_connector->ddc_bus->rec.mask_data_reg, 282 radeon_connector->ddc_bus->rec.mask_data_reg,
271 radeon_connector->ddc_bus->rec.a_clk_reg, 283 radeon_connector->ddc_bus->rec.a_clk_reg,
272 radeon_connector->ddc_bus->rec.a_data_reg, 284 radeon_connector->ddc_bus->rec.a_data_reg,
273 radeon_connector->ddc_bus->rec.put_clk_reg, 285 radeon_connector->ddc_bus->rec.en_clk_reg,
274 radeon_connector->ddc_bus->rec.put_data_reg, 286 radeon_connector->ddc_bus->rec.en_data_reg,
275 radeon_connector->ddc_bus->rec.get_clk_reg, 287 radeon_connector->ddc_bus->rec.y_clk_reg,
276 radeon_connector->ddc_bus->rec.get_data_reg); 288 radeon_connector->ddc_bus->rec.y_data_reg);
277 DRM_INFO(" Encoders:\n"); 289 DRM_INFO(" Encoders:\n");
278 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 290 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
279 radeon_encoder = to_radeon_encoder(encoder); 291 radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
324 ret = radeon_get_legacy_connector_info_from_table(dev); 336 ret = radeon_get_legacy_connector_info_from_table(dev);
325 } 337 }
326 if (ret) { 338 if (ret) {
339 radeon_setup_encoder_clones(dev);
327 radeon_print_display_setup(dev); 340 radeon_print_display_setup(dev);
328 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) 341 list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
329 radeon_ddc_dump(drm_connector); 342 radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
336{ 349{
337 int ret = 0; 350 int ret = 0;
338 351
352 if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
353 struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
354 if (dig->dp_i2c_bus)
355 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
356 }
339 if (!radeon_connector->ddc_bus) 357 if (!radeon_connector->ddc_bus)
340 return -1; 358 return -1;
341 if (!radeon_connector->edid) { 359 if (!radeon_connector->edid) {
342 radeon_i2c_do_lock(radeon_connector, 1); 360 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
343 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); 361 radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
344 radeon_i2c_do_lock(radeon_connector, 0); 362 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
345 } 363 }
346 364
347 if (radeon_connector->edid) { 365 if (radeon_connector->edid) {
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
361 379
362 if (!radeon_connector->ddc_bus) 380 if (!radeon_connector->ddc_bus)
363 return -1; 381 return -1;
364 radeon_i2c_do_lock(radeon_connector, 1); 382 radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
365 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter); 383 edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
366 radeon_i2c_do_lock(radeon_connector, 0); 384 radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
367 if (edid) { 385 if (edid) {
368 kfree(edid); 386 kfree(edid);
369 } 387 }
@@ -542,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
542 *post_div_p = best_post_div; 560 *post_div_p = best_post_div;
543} 561}
544 562
563void radeon_compute_pll_avivo(struct radeon_pll *pll,
564 uint64_t freq,
565 uint32_t *dot_clock_p,
566 uint32_t *fb_div_p,
567 uint32_t *frac_fb_div_p,
568 uint32_t *ref_div_p,
569 uint32_t *post_div_p,
570 int flags)
571{
572 fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
573 fixed20_12 pll_out_max, pll_out_min;
574 fixed20_12 pll_in_max, pll_in_min;
575 fixed20_12 reference_freq;
576 fixed20_12 error, ffreq, a, b;
577
578 pll_out_max.full = rfixed_const(pll->pll_out_max);
579 pll_out_min.full = rfixed_const(pll->pll_out_min);
580 pll_in_max.full = rfixed_const(pll->pll_in_max);
581 pll_in_min.full = rfixed_const(pll->pll_in_min);
582 reference_freq.full = rfixed_const(pll->reference_freq);
583 do_div(freq, 10);
584 ffreq.full = rfixed_const(freq);
585 error.full = rfixed_const(100 * 100);
586
587 /* max p */
588 p.full = rfixed_div(pll_out_max, ffreq);
589 p.full = rfixed_floor(p);
590
591 /* min m */
592 m.full = rfixed_div(reference_freq, pll_in_max);
593 m.full = rfixed_ceil(m);
594
595 while (1) {
596 n.full = rfixed_div(ffreq, reference_freq);
597 n.full = rfixed_mul(n, m);
598 n.full = rfixed_mul(n, p);
599
600 f_vco.full = rfixed_div(n, m);
601 f_vco.full = rfixed_mul(f_vco, reference_freq);
602
603 f_pclk.full = rfixed_div(f_vco, p);
604
605 if (f_pclk.full > ffreq.full)
606 error.full = f_pclk.full - ffreq.full;
607 else
608 error.full = ffreq.full - f_pclk.full;
609 error.full = rfixed_div(error, f_pclk);
610 a.full = rfixed_const(100 * 100);
611 error.full = rfixed_mul(error, a);
612
613 a.full = rfixed_mul(m, p);
614 a.full = rfixed_div(n, a);
615 best_freq.full = rfixed_mul(reference_freq, a);
616
617 if (rfixed_trunc(error) < 25)
618 break;
619
620 a.full = rfixed_const(1);
621 m.full = m.full + a.full;
622 a.full = rfixed_div(reference_freq, m);
623 if (a.full >= pll_in_min.full)
624 continue;
625
626 m.full = rfixed_div(reference_freq, pll_in_max);
627 m.full = rfixed_ceil(m);
628 a.full= rfixed_const(1);
629 p.full = p.full - a.full;
630 a.full = rfixed_mul(p, ffreq);
631 if (a.full >= pll_out_min.full)
632 continue;
633 else {
634 DRM_ERROR("Unable to find pll dividers\n");
635 break;
636 }
637 }
638
639 a.full = rfixed_const(10);
640 b.full = rfixed_mul(n, a);
641
642 frac_n.full = rfixed_floor(n);
643 frac_n.full = rfixed_mul(frac_n, a);
644 frac_n.full = b.full - frac_n.full;
645
646 *dot_clock_p = rfixed_trunc(best_freq);
647 *fb_div_p = rfixed_trunc(n);
648 *frac_fb_div_p = rfixed_trunc(frac_n);
649 *ref_div_p = rfixed_trunc(m);
650 *post_div_p = rfixed_trunc(p);
651
652 DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
653}
654
545static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) 655static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
546{ 656{
547 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 657 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -642,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
642 return -ENOMEM; 752 return -ENOMEM;
643 753
644 rdev->mode_info.coherent_mode_property->values[0] = 0; 754 rdev->mode_info.coherent_mode_property->values[0] = 0;
645 rdev->mode_info.coherent_mode_property->values[0] = 1; 755 rdev->mode_info.coherent_mode_property->values[1] = 1;
646 } 756 }
647 757
648 if (!ASIC_IS_AVIVO(rdev)) { 758 if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
666 if (!rdev->mode_info.load_detect_property) 776 if (!rdev->mode_info.load_detect_property)
667 return -ENOMEM; 777 return -ENOMEM;
668 rdev->mode_info.load_detect_property->values[0] = 0; 778 rdev->mode_info.load_detect_property->values[0] = 0;
669 rdev->mode_info.load_detect_property->values[0] = 1; 779 rdev->mode_info.load_detect_property->values[1] = 1;
670 780
671 drm_mode_create_scaling_mode_property(rdev->ddev); 781 drm_mode_create_scaling_mode_property(rdev->ddev);
672 782
@@ -723,6 +833,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
723 if (!ret) { 833 if (!ret) {
724 return ret; 834 return ret;
725 } 835 }
836 /* initialize hpd */
837 radeon_hpd_init(rdev);
726 drm_helper_initial_config(rdev->ddev); 838 drm_helper_initial_config(rdev->ddev);
727 return 0; 839 return 0;
728} 840}
@@ -730,6 +842,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
730void radeon_modeset_fini(struct radeon_device *rdev) 842void radeon_modeset_fini(struct radeon_device *rdev)
731{ 843{
732 if (rdev->mode_info.mode_config_initialized) { 844 if (rdev->mode_info.mode_config_initialized) {
845 radeon_hpd_fini(rdev);
733 drm_mode_config_cleanup(rdev->ddev); 846 drm_mode_config_cleanup(rdev->ddev);
734 rdev->mode_info.mode_config_initialized = false; 847 rdev->mode_info.mode_config_initialized = false;
735 } 848 }
@@ -750,9 +863,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
750 if (encoder->crtc != crtc) 863 if (encoder->crtc != crtc)
751 continue; 864 continue;
752 if (first) { 865 if (first) {
753 radeon_crtc->rmx_type = radeon_encoder->rmx_type; 866 /* set scaling */
867 if (radeon_encoder->rmx_type == RMX_OFF)
868 radeon_crtc->rmx_type = RMX_OFF;
869 else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
870 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
871 radeon_crtc->rmx_type = radeon_encoder->rmx_type;
872 else
873 radeon_crtc->rmx_type = RMX_OFF;
874 /* copy native mode */
754 memcpy(&radeon_crtc->native_mode, 875 memcpy(&radeon_crtc->native_mode,
755 &radeon_encoder->native_mode, 876 &radeon_encoder->native_mode,
756 sizeof(struct drm_display_mode)); 877 sizeof(struct drm_display_mode));
757 first = false; 878 first = false;
758 } else { 879 } else {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 7f50fb864af8..c5c45e626d74 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
86int radeon_testing = 0; 86int radeon_testing = 0;
87int radeon_connector_table = 0; 87int radeon_connector_table = 0;
88int radeon_tv = 1; 88int radeon_tv = 1;
89int radeon_new_pll = 1;
89 90
90MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 91MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
91module_param_named(no_wb, radeon_no_wb, int, 0444); 92module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
120MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 121MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
121module_param_named(tv, radeon_tv, int, 0444); 122module_param_named(tv, radeon_tv, int, 0444);
122 123
124MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
125module_param_named(new_pll, radeon_new_pll, int, 0444);
126
123static int radeon_suspend(struct drm_device *dev, pm_message_t state) 127static int radeon_suspend(struct drm_device *dev, pm_message_t state)
124{ 128{
125 drm_radeon_private_t *dev_priv = dev->dev_private; 129 drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 350962e0f346..e13785282a82 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1104# define R600_IT_WAIT_REG_MEM 0x00003C00 1104# define R600_IT_WAIT_REG_MEM 0x00003C00
1105# define R600_IT_MEM_WRITE 0x00003D00 1105# define R600_IT_MEM_WRITE 0x00003D00
1106# define R600_IT_INDIRECT_BUFFER 0x00003200 1106# define R600_IT_INDIRECT_BUFFER 0x00003200
1107# define R600_IT_CP_INTERRUPT 0x00004000
1108# define R600_IT_SURFACE_SYNC 0x00004300 1107# define R600_IT_SURFACE_SYNC 0x00004300
1109# define R600_CB0_DEST_BASE_ENA (1 << 6) 1108# define R600_CB0_DEST_BASE_ENA (1 << 6)
1110# define R600_TC_ACTION_ENA (1 << 23) 1109# define R600_TC_ACTION_ENA (1 << 23)
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d42bc512d75a..b4f23ec93201 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -35,6 +35,51 @@ extern int atom_debug;
35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, 35bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
36 struct drm_display_mode *mode); 36 struct drm_display_mode *mode);
37 37
38static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
39{
40 struct drm_device *dev = encoder->dev;
41 struct radeon_device *rdev = dev->dev_private;
42 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
43 struct drm_encoder *clone_encoder;
44 uint32_t index_mask = 0;
45 int count;
46
47 /* DIG routing gets problematic */
48 if (rdev->family >= CHIP_R600)
49 return index_mask;
50 /* LVDS/TV are too wacky */
51 if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
52 return index_mask;
53 /* DVO requires 2x ppll clocks depending on tmds chip */
54 if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
55 return index_mask;
56
57 count = -1;
58 list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
59 struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
60 count++;
61
62 if (clone_encoder == encoder)
63 continue;
64 if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
65 continue;
66 if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
67 continue;
68 else
69 index_mask |= (1 << count);
70 }
71 return index_mask;
72}
73
74void radeon_setup_encoder_clones(struct drm_device *dev)
75{
76 struct drm_encoder *encoder;
77
78 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
79 encoder->possible_clones = radeon_encoder_clones(encoder);
80 }
81}
82
38uint32_t 83uint32_t
39radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac) 84radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
40{ 85{
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
163 return NULL; 208 return NULL;
164} 209}
165 210
166/* used for both atom and legacy */
167void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
168 struct drm_display_mode *mode,
169 struct drm_display_mode *adjusted_mode)
170{
171 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
172 struct drm_device *dev = encoder->dev;
173 struct radeon_device *rdev = dev->dev_private;
174 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
175
176 if (mode->hdisplay < native_mode->hdisplay ||
177 mode->vdisplay < native_mode->vdisplay) {
178 int mode_id = adjusted_mode->base.id;
179 *adjusted_mode = *native_mode;
180 if (!ASIC_IS_AVIVO(rdev)) {
181 adjusted_mode->hdisplay = mode->hdisplay;
182 adjusted_mode->vdisplay = mode->vdisplay;
183 }
184 adjusted_mode->base.id = mode_id;
185 }
186}
187
188
189static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, 211static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
190 struct drm_display_mode *mode, 212 struct drm_display_mode *mode,
191 struct drm_display_mode *adjusted_mode) 213 struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
198 radeon_encoder_set_active_device(encoder); 220 radeon_encoder_set_active_device(encoder);
199 drm_mode_set_crtcinfo(adjusted_mode, 0); 221 drm_mode_set_crtcinfo(adjusted_mode, 0);
200 222
201 if (radeon_encoder->rmx_type != RMX_OFF)
202 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
203
204 /* hw bug */ 223 /* hw bug */
205 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) 224 if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
206 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) 225 && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
207 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; 226 adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
208 227
228 /* get the native mode for LVDS */
229 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
230 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
231 int mode_id = adjusted_mode->base.id;
232 *adjusted_mode = *native_mode;
233 if (!ASIC_IS_AVIVO(rdev)) {
234 adjusted_mode->hdisplay = mode->hdisplay;
235 adjusted_mode->vdisplay = mode->vdisplay;
236 }
237 adjusted_mode->base.id = mode_id;
238 }
239
240 /* get the native mode for TV */
209 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) { 241 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
210 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; 242 struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
211 if (tv_dac) { 243 if (tv_dac) {
@@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
218 } 250 }
219 } 251 }
220 252
253 if (ASIC_IS_DCE3(rdev) &&
254 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
255 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
256 radeon_dp_set_link_config(connector, mode);
257 }
258
221 return true; 259 return true;
222} 260}
223 261
@@ -392,7 +430,7 @@ union lvds_encoder_control {
392 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2; 430 LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
393}; 431};
394 432
395static void 433void
396atombios_digital_setup(struct drm_encoder *encoder, int action) 434atombios_digital_setup(struct drm_encoder *encoder, int action)
397{ 435{
398 struct drm_device *dev = encoder->dev; 436 struct drm_device *dev = encoder->dev;
@@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
522{ 560{
523 struct drm_connector *connector; 561 struct drm_connector *connector;
524 struct radeon_connector *radeon_connector; 562 struct radeon_connector *radeon_connector;
563 struct radeon_connector_atom_dig *radeon_dig_connector;
525 564
526 connector = radeon_get_connector_for_encoder(encoder); 565 connector = radeon_get_connector_for_encoder(encoder);
527 if (!connector) 566 if (!connector)
@@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
551 return ATOM_ENCODER_MODE_LVDS; 590 return ATOM_ENCODER_MODE_LVDS;
552 break; 591 break;
553 case DRM_MODE_CONNECTOR_DisplayPort: 592 case DRM_MODE_CONNECTOR_DisplayPort:
554 /*if (radeon_output->MonType == MT_DP) 593 radeon_dig_connector = radeon_connector->con_priv;
555 return ATOM_ENCODER_MODE_DP; 594 if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
556 else*/ 595 return ATOM_ENCODER_MODE_DP;
557 if (drm_detect_hdmi_monitor(radeon_connector->edid)) 596 else if (drm_detect_hdmi_monitor(radeon_connector->edid))
558 return ATOM_ENCODER_MODE_HDMI; 597 return ATOM_ENCODER_MODE_HDMI;
559 else 598 else
560 return ATOM_ENCODER_MODE_DVI; 599 return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
573 } 612 }
574} 613}
575 614
615/*
616 * DIG Encoder/Transmitter Setup
617 *
618 * DCE 3.0/3.1
619 * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
620 * Supports up to 3 digital outputs
621 * - 2 DIG encoder blocks.
622 * DIG1 can drive UNIPHY link A or link B
623 * DIG2 can drive UNIPHY link B or LVTMA
624 *
625 * DCE 3.2
626 * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
627 * Supports up to 5 digital outputs
628 * - 2 DIG encoder blocks.
629 * DIG1/2 can drive UNIPHY0/1/2 link A or link B
630 *
631 * Routing
632 * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
633 * Examples:
634 * crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
635 * crtc1 -> dig1 -> UNIPHY0 link B -> DP
636 * crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
637 * crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
638 */
576static void 639static void
577atombios_dig_encoder_setup(struct drm_encoder *encoder, int action) 640atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
578{ 641{
@@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
614 } else { 677 } else {
615 switch (radeon_encoder->encoder_id) { 678 switch (radeon_encoder->encoder_id) {
616 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 679 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
617 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl); 680 /* XXX doesn't really matter which dig encoder we pick as long as it's
681 * not already in use
682 */
683 if (dig_connector->linkb)
684 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
685 else
686 index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
618 num = 1; 687 num = 1;
619 break; 688 break;
620 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 689 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
690 /* Only dig2 encoder can drive LVTMA */
621 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl); 691 index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
622 num = 2; 692 num = 2;
623 break; 693 break;
@@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
652 } 722 }
653 } 723 }
654 724
655 if (radeon_encoder->pixel_clock > 165000) { 725 args.ucEncoderMode = atombios_get_encoder_mode(encoder);
656 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B; 726
727 if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
728 if (dig_connector->dp_clock == 270000)
729 args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
730 args.ucLaneNum = dig_connector->dp_lane_count;
731 } else if (radeon_encoder->pixel_clock > 165000)
657 args.ucLaneNum = 8; 732 args.ucLaneNum = 8;
658 } else { 733 else
659 if (dig_connector->linkb)
660 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
661 else
662 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
663 args.ucLaneNum = 4; 734 args.ucLaneNum = 4;
664 }
665 735
666 args.ucEncoderMode = atombios_get_encoder_mode(encoder); 736 if (dig_connector->linkb)
737 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
738 else
739 args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
667 740
668 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 741 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
669 742
@@ -674,8 +747,8 @@ union dig_transmitter_control {
674 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2; 747 DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
675}; 748};
676 749
677static void 750void
678atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action) 751atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
679{ 752{
680 struct drm_device *dev = encoder->dev; 753 struct drm_device *dev = encoder->dev;
681 struct radeon_device *rdev = dev->dev_private; 754 struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
687 struct drm_connector *connector; 760 struct drm_connector *connector;
688 struct radeon_connector *radeon_connector; 761 struct radeon_connector *radeon_connector;
689 struct radeon_connector_atom_dig *dig_connector; 762 struct radeon_connector_atom_dig *dig_connector;
763 bool is_dp = false;
690 764
691 connector = radeon_get_connector_for_encoder(encoder); 765 connector = radeon_get_connector_for_encoder(encoder);
692 if (!connector) 766 if (!connector)
@@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
704 778
705 dig_connector = radeon_connector->con_priv; 779 dig_connector = radeon_connector->con_priv;
706 780
781 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
782 is_dp = true;
783
707 memset(&args, 0, sizeof(args)); 784 memset(&args, 0, sizeof(args));
708 785
709 if (ASIC_IS_DCE32(rdev)) 786 if (ASIC_IS_DCE32(rdev))
@@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
724 args.v1.ucAction = action; 801 args.v1.ucAction = action;
725 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 802 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
726 args.v1.usInitInfo = radeon_connector->connector_object_id; 803 args.v1.usInitInfo = radeon_connector->connector_object_id;
804 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
805 args.v1.asMode.ucLaneSel = lane_num;
806 args.v1.asMode.ucLaneSet = lane_set;
727 } else { 807 } else {
728 if (radeon_encoder->pixel_clock > 165000) 808 if (is_dp)
809 args.v1.usPixelClock =
810 cpu_to_le16(dig_connector->dp_clock / 10);
811 else if (radeon_encoder->pixel_clock > 165000)
729 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10); 812 args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
730 else 813 else
731 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 814 args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
732 } 815 }
733 if (ASIC_IS_DCE32(rdev)) { 816 if (ASIC_IS_DCE32(rdev)) {
734 if (radeon_encoder->pixel_clock > 165000)
735 args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
736 if (dig->dig_block) 817 if (dig->dig_block)
737 args.v2.acConfig.ucEncoderSel = 1; 818 args.v2.acConfig.ucEncoderSel = 1;
819 if (dig_connector->linkb)
820 args.v2.acConfig.ucLinkSel = 1;
738 821
739 switch (radeon_encoder->encoder_id) { 822 switch (radeon_encoder->encoder_id) {
740 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 823 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
751 break; 834 break;
752 } 835 }
753 836
754 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 837 if (is_dp)
838 args.v2.acConfig.fCoherentMode = 1;
839 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
755 if (dig->coherent_mode) 840 if (dig->coherent_mode)
756 args.v2.acConfig.fCoherentMode = 1; 841 args.v2.acConfig.fCoherentMode = 1;
757 } 842 }
@@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
760 845
761 switch (radeon_encoder->encoder_id) { 846 switch (radeon_encoder->encoder_id) {
762 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 847 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
763 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 848 /* XXX doesn't really matter which dig encoder we pick as long as it's
849 * not already in use
850 */
851 if (dig_connector->linkb)
852 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
853 else
854 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
764 if (rdev->flags & RADEON_IS_IGP) { 855 if (rdev->flags & RADEON_IS_IGP) {
765 if (radeon_encoder->pixel_clock > 165000) { 856 if (radeon_encoder->pixel_clock > 165000) {
766 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
767 ATOM_TRANSMITTER_CONFIG_LINKA_B);
768 if (dig_connector->igp_lane_info & 0x3) 857 if (dig_connector->igp_lane_info & 0x3)
769 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7; 858 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
770 else if (dig_connector->igp_lane_info & 0xc) 859 else if (dig_connector->igp_lane_info & 0xc)
771 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15; 860 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
772 } else { 861 } else {
773 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
774 if (dig_connector->igp_lane_info & 0x1) 862 if (dig_connector->igp_lane_info & 0x1)
775 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3; 863 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
776 else if (dig_connector->igp_lane_info & 0x2) 864 else if (dig_connector->igp_lane_info & 0x2)
@@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
780 else if (dig_connector->igp_lane_info & 0x8) 868 else if (dig_connector->igp_lane_info & 0x8)
781 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15; 869 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
782 } 870 }
783 } else {
784 if (radeon_encoder->pixel_clock > 165000)
785 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
786 ATOM_TRANSMITTER_CONFIG_LINKA_B |
787 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
788 else {
789 if (dig_connector->linkb)
790 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
791 else
792 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
793 }
794 } 871 }
795 break; 872 break;
796 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 873 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
874 /* Only dig2 encoder can drive LVTMA */
797 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; 875 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
798 if (radeon_encoder->pixel_clock > 165000)
799 args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
800 ATOM_TRANSMITTER_CONFIG_LINKA_B |
801 ATOM_TRANSMITTER_CONFIG_LANE_0_7);
802 else {
803 if (dig_connector->linkb)
804 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
805 else
806 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
807 }
808 break; 876 break;
809 } 877 }
810 878
811 if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 879 if (radeon_encoder->pixel_clock > 165000)
880 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
881
882 if (dig_connector->linkb)
883 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
884 else
885 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
886
887 if (is_dp)
888 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
889 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
812 if (dig->coherent_mode) 890 if (dig->coherent_mode)
813 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT; 891 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
814 } 892 }
@@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
918 if (is_dig) { 996 if (is_dig) {
919 switch (mode) { 997 switch (mode) {
920 case DRM_MODE_DPMS_ON: 998 case DRM_MODE_DPMS_ON:
921 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); 999 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1000 {
1001 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1002 dp_link_train(encoder, connector);
1003 }
922 break; 1004 break;
923 case DRM_MODE_DPMS_STANDBY: 1005 case DRM_MODE_DPMS_STANDBY:
924 case DRM_MODE_DPMS_SUSPEND: 1006 case DRM_MODE_DPMS_SUSPEND:
925 case DRM_MODE_DPMS_OFF: 1007 case DRM_MODE_DPMS_OFF:
926 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); 1008 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
927 break; 1009 break;
928 } 1010 }
929 } else { 1011 } else {
@@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
1025 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1107 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1026 else 1108 else
1027 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1109 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1028 } else 1110 } else {
1029 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID; 1111 struct drm_connector *connector;
1112 struct radeon_connector *radeon_connector;
1113 struct radeon_connector_atom_dig *dig_connector;
1114
1115 connector = radeon_get_connector_for_encoder(encoder);
1116 if (!connector)
1117 return;
1118 radeon_connector = to_radeon_connector(connector);
1119 if (!radeon_connector->con_priv)
1120 return;
1121 dig_connector = radeon_connector->con_priv;
1122
1123 /* XXX doesn't really matter which dig encoder we pick as long as it's
1124 * not already in use
1125 */
1126 if (dig_connector->linkb)
1127 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1128 else
1129 args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
1130 }
1030 break; 1131 break;
1031 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 1132 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
1032 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID; 1133 args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
1033 break; 1134 break;
1034 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1135 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1136 /* Only dig2 encoder can drive LVTMA */
1035 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; 1137 args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
1036 break; 1138 break;
1037 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 1139 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1104,11 +1206,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1104 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1206 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1105 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 1207 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
1106 1208
1107 if (radeon_encoder->enc_priv) { 1209 if (radeon_encoder->active_device &
1108 struct radeon_encoder_atom_dig *dig; 1210 (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
1211 if (radeon_encoder->enc_priv) {
1212 struct radeon_encoder_atom_dig *dig;
1109 1213
1110 dig = radeon_encoder->enc_priv; 1214 dig = radeon_encoder->enc_priv;
1111 dig->dig_block = radeon_crtc->crtc_id; 1215 dig->dig_block = radeon_crtc->crtc_id;
1216 }
1112 } 1217 }
1113 radeon_encoder->pixel_clock = adjusted_mode->clock; 1218 radeon_encoder->pixel_clock = adjusted_mode->clock;
1114 1219
@@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1134 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 1239 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
1135 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 1240 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
1136 /* disable the encoder and transmitter */ 1241 /* disable the encoder and transmitter */
1137 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE); 1242 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
1138 atombios_dig_encoder_setup(encoder, ATOM_DISABLE); 1243 atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
1139 1244
1140 /* setup and enable the encoder and transmitter */ 1245 /* setup and enable the encoder and transmitter */
1141 atombios_dig_encoder_setup(encoder, ATOM_ENABLE); 1246 atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
1142 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT); 1247 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
1143 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP); 1248 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
1144 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE); 1249 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1145 break; 1250 break;
1146 case ENCODER_OBJECT_ID_INTERNAL_DDI: 1251 case ENCODER_OBJECT_ID_INTERNAL_DDI:
1147 atombios_ddia_setup(encoder, ATOM_ENABLE); 1252 atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
1354 encoder->possible_crtcs = 0x1; 1459 encoder->possible_crtcs = 0x1;
1355 else 1460 else
1356 encoder->possible_crtcs = 0x3; 1461 encoder->possible_crtcs = 0x3;
1357 encoder->possible_clones = 0;
1358 1462
1359 radeon_encoder->enc_priv = NULL; 1463 radeon_encoder->enc_priv = NULL;
1360 1464
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d10eb43645c8..3ba213d1b06c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
140 struct radeon_framebuffer *rfb; 140 struct radeon_framebuffer *rfb;
141 struct drm_mode_fb_cmd mode_cmd; 141 struct drm_mode_fb_cmd mode_cmd;
142 struct drm_gem_object *gobj = NULL; 142 struct drm_gem_object *gobj = NULL;
143 struct radeon_object *robj = NULL; 143 struct radeon_bo *rbo = NULL;
144 struct device *device = &rdev->pdev->dev; 144 struct device *device = &rdev->pdev->dev;
145 int size, aligned_size, ret; 145 int size, aligned_size, ret;
146 u64 fb_gpuaddr; 146 u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
168 ret = radeon_gem_object_create(rdev, aligned_size, 0, 168 ret = radeon_gem_object_create(rdev, aligned_size, 0,
169 RADEON_GEM_DOMAIN_VRAM, 169 RADEON_GEM_DOMAIN_VRAM,
170 false, ttm_bo_type_kernel, 170 false, ttm_bo_type_kernel,
171 false, &gobj); 171 &gobj);
172 if (ret) { 172 if (ret) {
173 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", 173 printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
174 surface_width, surface_height); 174 surface_width, surface_height);
175 ret = -ENOMEM; 175 ret = -ENOMEM;
176 goto out; 176 goto out;
177 } 177 }
178 robj = gobj->driver_private; 178 rbo = gobj->driver_private;
179 179
180 if (fb_tiled) 180 if (fb_tiled)
181 tiling_flags = RADEON_TILING_MACRO; 181 tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
192 } 192 }
193#endif 193#endif
194 194
195 if (tiling_flags) 195 if (tiling_flags) {
196 radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); 196 ret = radeon_bo_set_tiling_flags(rbo,
197 tiling_flags | RADEON_TILING_SURFACE,
198 mode_cmd.pitch);
199 if (ret)
200 dev_err(rdev->dev, "FB failed to set tiling flags\n");
201 }
197 mutex_lock(&rdev->ddev->struct_mutex); 202 mutex_lock(&rdev->ddev->struct_mutex);
198 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); 203 fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
199 if (fb == NULL) { 204 if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
201 ret = -ENOMEM; 206 ret = -ENOMEM;
202 goto out_unref; 207 goto out_unref;
203 } 208 }
204 ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr); 209 ret = radeon_bo_reserve(rbo, false);
210 if (unlikely(ret != 0))
211 goto out_unref;
212 ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
213 if (ret) {
214 radeon_bo_unreserve(rbo);
215 goto out_unref;
216 }
217 if (fb_tiled)
218 radeon_bo_check_tiling(rbo, 0, 0);
219 ret = radeon_bo_kmap(rbo, &fbptr);
220 radeon_bo_unreserve(rbo);
205 if (ret) { 221 if (ret) {
206 printk(KERN_ERR "failed to pin framebuffer\n");
207 ret = -ENOMEM;
208 goto out_unref; 222 goto out_unref;
209 } 223 }
210 224
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
213 *fb_p = fb; 227 *fb_p = fb;
214 rfb = to_radeon_framebuffer(fb); 228 rfb = to_radeon_framebuffer(fb);
215 rdev->fbdev_rfb = rfb; 229 rdev->fbdev_rfb = rfb;
216 rdev->fbdev_robj = robj; 230 rdev->fbdev_rbo = rbo;
217 231
218 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device); 232 info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
219 if (info == NULL) { 233 if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
234 if (ret) 248 if (ret)
235 goto out_unref; 249 goto out_unref;
236 250
237 if (fb_tiled) 251 memset_io(fbptr, 0xff, aligned_size);
238 radeon_object_check_tiling(robj, 0, 0);
239
240 ret = radeon_object_kmap(robj, &fbptr);
241 if (ret) {
242 goto out_unref;
243 }
244
245 memset_io(fbptr, 0, aligned_size);
246 252
247 strcpy(info->fix.id, "radeondrmfb"); 253 strcpy(info->fix.id, "radeondrmfb");
248 254
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
288 return 0; 294 return 0;
289 295
290out_unref: 296out_unref:
291 if (robj) { 297 if (rbo) {
292 radeon_object_kunmap(robj); 298 ret = radeon_bo_reserve(rbo, false);
299 if (likely(ret == 0)) {
300 radeon_bo_kunmap(rbo);
301 radeon_bo_unreserve(rbo);
302 }
293 } 303 }
294 if (fb && ret) { 304 if (fb && ret) {
295 list_del(&fb->filp_head); 305 list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
321 331
322int radeonfb_probe(struct drm_device *dev) 332int radeonfb_probe(struct drm_device *dev)
323{ 333{
324 return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); 334 struct radeon_device *rdev = dev->dev_private;
335 int bpp_sel = 32;
336
337 /* select 8 bpp console on RN50 or 16MB cards */
338 if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
339 bpp_sel = 8;
340
341 return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
325} 342}
326 343
327int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 344int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
328{ 345{
329 struct fb_info *info; 346 struct fb_info *info;
330 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb); 347 struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
331 struct radeon_object *robj; 348 struct radeon_bo *rbo;
349 int r;
332 350
333 if (!fb) { 351 if (!fb) {
334 return -EINVAL; 352 return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
336 info = fb->fbdev; 354 info = fb->fbdev;
337 if (info) { 355 if (info) {
338 struct radeon_fb_device *rfbdev = info->par; 356 struct radeon_fb_device *rfbdev = info->par;
339 robj = rfb->obj->driver_private; 357 rbo = rfb->obj->driver_private;
340 unregister_framebuffer(info); 358 unregister_framebuffer(info);
341 radeon_object_kunmap(robj); 359 r = radeon_bo_reserve(rbo, false);
342 radeon_object_unpin(robj); 360 if (likely(r == 0)) {
361 radeon_bo_kunmap(rbo);
362 radeon_bo_unpin(rbo);
363 radeon_bo_unreserve(rbo);
364 }
343 drm_fb_helper_free(&rfbdev->helper); 365 drm_fb_helper_free(&rfbdev->helper);
344 framebuffer_release(info); 366 framebuffer_release(info);
345 } 367 }
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 3beb26d74719..cb4cd97ae39f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
168 return signaled; 168 return signaled;
169} 169}
170 170
171int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy)
172{
173 struct radeon_device *rdev;
174 int ret = 0;
175
176 rdev = fence->rdev;
177
178 __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
179
180 while (1) {
181 if (radeon_fence_signaled(fence))
182 break;
183
184 if (time_after_eq(jiffies, fence->timeout)) {
185 ret = -EBUSY;
186 break;
187 }
188
189 if (lazy)
190 schedule_timeout(1);
191
192 if (intr && signal_pending(current)) {
193 ret = -ERESTARTSYS;
194 break;
195 }
196 }
197 __set_current_state(TASK_RUNNING);
198 return ret;
199}
200
201
202int radeon_fence_wait(struct radeon_fence *fence, bool intr) 171int radeon_fence_wait(struct radeon_fence *fence, bool intr)
203{ 172{
204 struct radeon_device *rdev; 173 struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
216 return 0; 185 return 0;
217 } 186 }
218 187
219 if (rdev->family >= CHIP_R600) {
220 r = r600_fence_wait(fence, intr, 0);
221 if (r == -ERESTARTSYS)
222 return -EBUSY;
223 return r;
224 }
225
226retry: 188retry:
227 cur_jiffies = jiffies; 189 cur_jiffies = jiffies;
228 timeout = HZ / 100; 190 timeout = HZ / 100;
@@ -231,14 +193,17 @@ retry:
231 } 193 }
232 194
233 if (intr) { 195 if (intr) {
196 radeon_irq_kms_sw_irq_get(rdev);
234 r = wait_event_interruptible_timeout(rdev->fence_drv.queue, 197 r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
235 radeon_fence_signaled(fence), timeout); 198 radeon_fence_signaled(fence), timeout);
236 if (unlikely(r == -ERESTARTSYS)) { 199 radeon_irq_kms_sw_irq_put(rdev);
237 return -EBUSY; 200 if (unlikely(r < 0))
238 } 201 return r;
239 } else { 202 } else {
203 radeon_irq_kms_sw_irq_get(rdev);
240 r = wait_event_timeout(rdev->fence_drv.queue, 204 r = wait_event_timeout(rdev->fence_drv.queue,
241 radeon_fence_signaled(fence), timeout); 205 radeon_fence_signaled(fence), timeout);
206 radeon_irq_kms_sw_irq_put(rdev);
242 } 207 }
243 if (unlikely(!radeon_fence_signaled(fence))) { 208 if (unlikely(!radeon_fence_signaled(fence))) {
244 if (unlikely(r == 0)) { 209 if (unlikely(r == 0)) {
diff --git a/drivers/gpu/drm/radeon/radeon_fixed.h b/drivers/gpu/drm/radeon/radeon_fixed.h
index 90187d173847..3d4d84e078ac 100644
--- a/drivers/gpu/drm/radeon/radeon_fixed.h
+++ b/drivers/gpu/drm/radeon/radeon_fixed.h
@@ -38,6 +38,23 @@ typedef union rfixed {
38#define fixed_init_half(A) { .full = rfixed_const_half((A)) } 38#define fixed_init_half(A) { .full = rfixed_const_half((A)) }
39#define rfixed_trunc(A) ((A).full >> 12) 39#define rfixed_trunc(A) ((A).full >> 12)
40 40
41static inline u32 rfixed_floor(fixed20_12 A)
42{
43 u32 non_frac = rfixed_trunc(A);
44
45 return rfixed_const(non_frac);
46}
47
48static inline u32 rfixed_ceil(fixed20_12 A)
49{
50 u32 non_frac = rfixed_trunc(A);
51
52 if (A.full > rfixed_const(non_frac))
53 return rfixed_const(non_frac + 1);
54 else
55 return rfixed_const(non_frac);
56}
57
41static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B) 58static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
42{ 59{
43 u64 tmp = ((u64)A.full << 13); 60 u64 tmp = ((u64)A.full << 13);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index a68d7566178c..e73d56e83fa6 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
78 int r; 78 int r;
79 79
80 if (rdev->gart.table.vram.robj == NULL) { 80 if (rdev->gart.table.vram.robj == NULL) {
81 r = radeon_object_create(rdev, NULL, 81 r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
82 rdev->gart.table_size, 82 true, RADEON_GEM_DOMAIN_VRAM,
83 true, 83 &rdev->gart.table.vram.robj);
84 RADEON_GEM_DOMAIN_VRAM,
85 false, &rdev->gart.table.vram.robj);
86 if (r) { 84 if (r) {
87 return r; 85 return r;
88 } 86 }
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
95 uint64_t gpu_addr; 93 uint64_t gpu_addr;
96 int r; 94 int r;
97 95
98 r = radeon_object_pin(rdev->gart.table.vram.robj, 96 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
99 RADEON_GEM_DOMAIN_VRAM, &gpu_addr); 97 if (unlikely(r != 0))
100 if (r) {
101 radeon_object_unref(&rdev->gart.table.vram.robj);
102 return r; 98 return r;
103 } 99 r = radeon_bo_pin(rdev->gart.table.vram.robj,
104 r = radeon_object_kmap(rdev->gart.table.vram.robj, 100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
105 (void **)&rdev->gart.table.vram.ptr);
106 if (r) { 101 if (r) {
107 radeon_object_unpin(rdev->gart.table.vram.robj); 102 radeon_bo_unreserve(rdev->gart.table.vram.robj);
108 radeon_object_unref(&rdev->gart.table.vram.robj);
109 DRM_ERROR("radeon: failed to map gart vram table.\n");
110 return r; 103 return r;
111 } 104 }
105 r = radeon_bo_kmap(rdev->gart.table.vram.robj,
106 (void **)&rdev->gart.table.vram.ptr);
107 if (r)
108 radeon_bo_unpin(rdev->gart.table.vram.robj);
109 radeon_bo_unreserve(rdev->gart.table.vram.robj);
112 rdev->gart.table_addr = gpu_addr; 110 rdev->gart.table_addr = gpu_addr;
113 return 0; 111 return r;
114} 112}
115 113
116void radeon_gart_table_vram_free(struct radeon_device *rdev) 114void radeon_gart_table_vram_free(struct radeon_device *rdev)
117{ 115{
116 int r;
117
118 if (rdev->gart.table.vram.robj == NULL) { 118 if (rdev->gart.table.vram.robj == NULL) {
119 return; 119 return;
120 } 120 }
121 radeon_object_kunmap(rdev->gart.table.vram.robj); 121 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
122 radeon_object_unpin(rdev->gart.table.vram.robj); 122 if (likely(r == 0)) {
123 radeon_object_unref(&rdev->gart.table.vram.robj); 123 radeon_bo_kunmap(rdev->gart.table.vram.robj);
124 radeon_bo_unpin(rdev->gart.table.vram.robj);
125 radeon_bo_unreserve(rdev->gart.table.vram.robj);
126 }
127 radeon_bo_unref(&rdev->gart.table.vram.robj);
124} 128}
125 129
126 130
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index d880edf254db..2944486871b0 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
38 38
39void radeon_gem_object_free(struct drm_gem_object *gobj) 39void radeon_gem_object_free(struct drm_gem_object *gobj)
40{ 40{
41 struct radeon_object *robj = gobj->driver_private; 41 struct radeon_bo *robj = gobj->driver_private;
42 42
43 gobj->driver_private = NULL; 43 gobj->driver_private = NULL;
44 if (robj) { 44 if (robj) {
45 radeon_object_unref(&robj); 45 radeon_bo_unref(&robj);
46 } 46 }
47} 47}
48 48
49int radeon_gem_object_create(struct radeon_device *rdev, int size, 49int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain, 50 int alignment, int initial_domain,
51 bool discardable, bool kernel, 51 bool discardable, bool kernel,
52 bool interruptible, 52 struct drm_gem_object **obj)
53 struct drm_gem_object **obj)
54{ 53{
55 struct drm_gem_object *gobj; 54 struct drm_gem_object *gobj;
56 struct radeon_object *robj; 55 struct radeon_bo *robj;
57 int r; 56 int r;
58 57
59 *obj = NULL; 58 *obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
65 if (alignment < PAGE_SIZE) { 64 if (alignment < PAGE_SIZE) {
66 alignment = PAGE_SIZE; 65 alignment = PAGE_SIZE;
67 } 66 }
68 r = radeon_object_create(rdev, gobj, size, kernel, initial_domain, 67 r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
69 interruptible, &robj);
70 if (r) { 68 if (r) {
71 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n", 69 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
72 size, initial_domain, alignment); 70 size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
83int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, 81int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
84 uint64_t *gpu_addr) 82 uint64_t *gpu_addr)
85{ 83{
86 struct radeon_object *robj = obj->driver_private; 84 struct radeon_bo *robj = obj->driver_private;
87 uint32_t flags; 85 int r;
88 86
89 switch (pin_domain) { 87 r = radeon_bo_reserve(robj, false);
90 case RADEON_GEM_DOMAIN_VRAM: 88 if (unlikely(r != 0))
91 flags = TTM_PL_FLAG_VRAM; 89 return r;
92 break; 90 r = radeon_bo_pin(robj, pin_domain, gpu_addr);
93 case RADEON_GEM_DOMAIN_GTT: 91 radeon_bo_unreserve(robj);
94 flags = TTM_PL_FLAG_TT; 92 return r;
95 break;
96 default:
97 flags = TTM_PL_FLAG_SYSTEM;
98 break;
99 }
100 return radeon_object_pin(robj, flags, gpu_addr);
101} 93}
102 94
103void radeon_gem_object_unpin(struct drm_gem_object *obj) 95void radeon_gem_object_unpin(struct drm_gem_object *obj)
104{ 96{
105 struct radeon_object *robj = obj->driver_private; 97 struct radeon_bo *robj = obj->driver_private;
106 radeon_object_unpin(robj); 98 int r;
99
100 r = radeon_bo_reserve(robj, false);
101 if (likely(r == 0)) {
102 radeon_bo_unpin(robj);
103 radeon_bo_unreserve(robj);
104 }
107} 105}
108 106
109int radeon_gem_set_domain(struct drm_gem_object *gobj, 107int radeon_gem_set_domain(struct drm_gem_object *gobj,
110 uint32_t rdomain, uint32_t wdomain) 108 uint32_t rdomain, uint32_t wdomain)
111{ 109{
112 struct radeon_object *robj; 110 struct radeon_bo *robj;
113 uint32_t domain; 111 uint32_t domain;
114 int r; 112 int r;
115 113
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
127 } 125 }
128 if (domain == RADEON_GEM_DOMAIN_CPU) { 126 if (domain == RADEON_GEM_DOMAIN_CPU) {
129 /* Asking for cpu access wait for object idle */ 127 /* Asking for cpu access wait for object idle */
130 r = radeon_object_wait(robj); 128 r = radeon_bo_wait(robj, NULL, false);
131 if (r) { 129 if (r) {
132 printk(KERN_ERR "Failed to wait for object !\n"); 130 printk(KERN_ERR "Failed to wait for object !\n");
133 return r; 131 return r;
134 } 132 }
133 radeon_hdp_flush(robj->rdev);
135 } 134 }
136 return 0; 135 return 0;
137} 136}
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
144 143
145void radeon_gem_fini(struct radeon_device *rdev) 144void radeon_gem_fini(struct radeon_device *rdev)
146{ 145{
147 radeon_object_force_delete(rdev); 146 radeon_bo_force_delete(rdev);
148} 147}
149 148
150 149
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
158 struct drm_radeon_gem_info *args = data; 157 struct drm_radeon_gem_info *args = data;
159 158
160 args->vram_size = rdev->mc.real_vram_size; 159 args->vram_size = rdev->mc.real_vram_size;
161 /* FIXME: report somethings that makes sense */ 160 args->vram_visible = rdev->mc.real_vram_size;
162 args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); 161 if (rdev->stollen_vga_memory)
163 args->gart_size = rdev->mc.gtt_size; 162 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
163 if (rdev->fbdev_rbo)
164 args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
165 args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
166 RADEON_IB_POOL_SIZE*64*1024;
164 return 0; 167 return 0;
165} 168}
166 169
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
192 /* create a gem object to contain this object in */ 195 /* create a gem object to contain this object in */
193 args->size = roundup(args->size, PAGE_SIZE); 196 args->size = roundup(args->size, PAGE_SIZE);
194 r = radeon_gem_object_create(rdev, args->size, args->alignment, 197 r = radeon_gem_object_create(rdev, args->size, args->alignment,
195 args->initial_domain, false, 198 args->initial_domain, false,
196 false, true, &gobj); 199 false, &gobj);
197 if (r) { 200 if (r) {
198 return r; 201 return r;
199 } 202 }
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
218 * just validate the BO into a certain domain */ 221 * just validate the BO into a certain domain */
219 struct drm_radeon_gem_set_domain *args = data; 222 struct drm_radeon_gem_set_domain *args = data;
220 struct drm_gem_object *gobj; 223 struct drm_gem_object *gobj;
221 struct radeon_object *robj; 224 struct radeon_bo *robj;
222 int r; 225 int r;
223 226
224 /* for now if someone requests domain CPU - 227 /* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
244{ 247{
245 struct drm_radeon_gem_mmap *args = data; 248 struct drm_radeon_gem_mmap *args = data;
246 struct drm_gem_object *gobj; 249 struct drm_gem_object *gobj;
247 struct radeon_object *robj; 250 struct radeon_bo *robj;
248 int r;
249 251
250 gobj = drm_gem_object_lookup(dev, filp, args->handle); 252 gobj = drm_gem_object_lookup(dev, filp, args->handle);
251 if (gobj == NULL) { 253 if (gobj == NULL) {
252 return -EINVAL; 254 return -EINVAL;
253 } 255 }
254 robj = gobj->driver_private; 256 robj = gobj->driver_private;
255 r = radeon_object_mmap(robj, &args->addr_ptr); 257 args->addr_ptr = radeon_bo_mmap_offset(robj);
256 mutex_lock(&dev->struct_mutex); 258 mutex_lock(&dev->struct_mutex);
257 drm_gem_object_unreference(gobj); 259 drm_gem_object_unreference(gobj);
258 mutex_unlock(&dev->struct_mutex); 260 mutex_unlock(&dev->struct_mutex);
259 return r; 261 return 0;
260} 262}
261 263
262int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 264int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
264{ 266{
265 struct drm_radeon_gem_busy *args = data; 267 struct drm_radeon_gem_busy *args = data;
266 struct drm_gem_object *gobj; 268 struct drm_gem_object *gobj;
267 struct radeon_object *robj; 269 struct radeon_bo *robj;
268 int r; 270 int r;
269 uint32_t cur_placement; 271 uint32_t cur_placement = 0;
270 272
271 gobj = drm_gem_object_lookup(dev, filp, args->handle); 273 gobj = drm_gem_object_lookup(dev, filp, args->handle);
272 if (gobj == NULL) { 274 if (gobj == NULL) {
273 return -EINVAL; 275 return -EINVAL;
274 } 276 }
275 robj = gobj->driver_private; 277 robj = gobj->driver_private;
276 r = radeon_object_busy_domain(robj, &cur_placement); 278 r = radeon_bo_wait(robj, &cur_placement, true);
277 switch (cur_placement) { 279 switch (cur_placement) {
278 case TTM_PL_VRAM: 280 case TTM_PL_VRAM:
279 args->domain = RADEON_GEM_DOMAIN_VRAM; 281 args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
297{ 299{
298 struct drm_radeon_gem_wait_idle *args = data; 300 struct drm_radeon_gem_wait_idle *args = data;
299 struct drm_gem_object *gobj; 301 struct drm_gem_object *gobj;
300 struct radeon_object *robj; 302 struct radeon_bo *robj;
301 int r; 303 int r;
302 304
303 gobj = drm_gem_object_lookup(dev, filp, args->handle); 305 gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
305 return -EINVAL; 307 return -EINVAL;
306 } 308 }
307 robj = gobj->driver_private; 309 robj = gobj->driver_private;
308 r = radeon_object_wait(robj); 310 r = radeon_bo_wait(robj, NULL, false);
309 mutex_lock(&dev->struct_mutex); 311 mutex_lock(&dev->struct_mutex);
310 drm_gem_object_unreference(gobj); 312 drm_gem_object_unreference(gobj);
311 mutex_unlock(&dev->struct_mutex); 313 mutex_unlock(&dev->struct_mutex);
314 radeon_hdp_flush(robj->rdev);
312 return r; 315 return r;
313} 316}
314 317
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
317{ 320{
318 struct drm_radeon_gem_set_tiling *args = data; 321 struct drm_radeon_gem_set_tiling *args = data;
319 struct drm_gem_object *gobj; 322 struct drm_gem_object *gobj;
320 struct radeon_object *robj; 323 struct radeon_bo *robj;
321 int r = 0; 324 int r = 0;
322 325
323 DRM_DEBUG("%d \n", args->handle); 326 DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
325 if (gobj == NULL) 328 if (gobj == NULL)
326 return -EINVAL; 329 return -EINVAL;
327 robj = gobj->driver_private; 330 robj = gobj->driver_private;
328 radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch); 331 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
329 mutex_lock(&dev->struct_mutex); 332 mutex_lock(&dev->struct_mutex);
330 drm_gem_object_unreference(gobj); 333 drm_gem_object_unreference(gobj);
331 mutex_unlock(&dev->struct_mutex); 334 mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
337{ 340{
338 struct drm_radeon_gem_get_tiling *args = data; 341 struct drm_radeon_gem_get_tiling *args = data;
339 struct drm_gem_object *gobj; 342 struct drm_gem_object *gobj;
340 struct radeon_object *robj; 343 struct radeon_bo *rbo;
341 int r = 0; 344 int r = 0;
342 345
343 DRM_DEBUG("\n"); 346 DRM_DEBUG("\n");
344 gobj = drm_gem_object_lookup(dev, filp, args->handle); 347 gobj = drm_gem_object_lookup(dev, filp, args->handle);
345 if (gobj == NULL) 348 if (gobj == NULL)
346 return -EINVAL; 349 return -EINVAL;
347 robj = gobj->driver_private; 350 rbo = gobj->driver_private;
348 radeon_object_get_tiling_flags(robj, &args->tiling_flags, 351 r = radeon_bo_reserve(rbo, false);
349 &args->pitch); 352 if (unlikely(r != 0))
353 return r;
354 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
355 radeon_bo_unreserve(rbo);
350 mutex_lock(&dev->struct_mutex); 356 mutex_lock(&dev->struct_mutex);
351 drm_gem_object_unreference(gobj); 357 drm_gem_object_unreference(gobj);
352 mutex_unlock(&dev->struct_mutex); 358 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index dd438d32e5c0..da3da1e89d00 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
59} 59}
60 60
61 61
62void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state) 62void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
63{ 63{
64 struct radeon_device *rdev = radeon_connector->base.dev->dev_private; 64 struct radeon_device *rdev = i2c->dev->dev_private;
65 struct radeon_i2c_bus_rec *rec = &i2c->rec;
65 uint32_t temp; 66 uint32_t temp;
66 struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
67 67
68 /* RV410 appears to have a bug where the hw i2c in reset 68 /* RV410 appears to have a bug where the hw i2c in reset
69 * holds the i2c port in a bad state - switch hw i2c away before 69 * holds the i2c port in a bad state - switch hw i2c away before
70 * doing DDC - do this for all r200s/r300s/r400s for safety sake 70 * doing DDC - do this for all r200s/r300s/r400s for safety sake
71 */ 71 */
72 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) { 72 if (rec->hw_capable) {
73 if (rec->a_clk_reg == RADEON_GPIO_MONID) { 73 if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
74 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 74 if (rec->a_clk_reg == RADEON_GPIO_MONID) {
75 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1))); 75 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
76 } else { 76 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
77 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST | 77 } else {
78 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3))); 78 WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
79 R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
80 }
79 } 81 }
80 } 82 }
81 if (lock_state) {
82 temp = RREG32(rec->a_clk_reg);
83 temp &= ~(rec->a_clk_mask);
84 WREG32(rec->a_clk_reg, temp);
85
86 temp = RREG32(rec->a_data_reg);
87 temp &= ~(rec->a_data_mask);
88 WREG32(rec->a_data_reg, temp);
89 }
90 83
84 /* clear the output pin values */
85 temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
86 WREG32(rec->a_clk_reg, temp);
87
88 temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
89 WREG32(rec->a_data_reg, temp);
90
91 /* set the pins to input */
92 temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
93 WREG32(rec->en_clk_reg, temp);
94
95 temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
96 WREG32(rec->en_data_reg, temp);
97
98 /* mask the gpio pins for software use */
91 temp = RREG32(rec->mask_clk_reg); 99 temp = RREG32(rec->mask_clk_reg);
92 if (lock_state) 100 if (lock_state)
93 temp |= rec->mask_clk_mask; 101 temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
112 struct radeon_i2c_bus_rec *rec = &i2c->rec; 120 struct radeon_i2c_bus_rec *rec = &i2c->rec;
113 uint32_t val; 121 uint32_t val;
114 122
115 val = RREG32(rec->get_clk_reg); 123 /* read the value off the pin */
116 val &= rec->get_clk_mask; 124 val = RREG32(rec->y_clk_reg);
125 val &= rec->y_clk_mask;
117 126
118 return (val != 0); 127 return (val != 0);
119} 128}
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
126 struct radeon_i2c_bus_rec *rec = &i2c->rec; 135 struct radeon_i2c_bus_rec *rec = &i2c->rec;
127 uint32_t val; 136 uint32_t val;
128 137
129 val = RREG32(rec->get_data_reg); 138 /* read the value off the pin */
130 val &= rec->get_data_mask; 139 val = RREG32(rec->y_data_reg);
140 val &= rec->y_data_mask;
141
131 return (val != 0); 142 return (val != 0);
132} 143}
133 144
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
138 struct radeon_i2c_bus_rec *rec = &i2c->rec; 149 struct radeon_i2c_bus_rec *rec = &i2c->rec;
139 uint32_t val; 150 uint32_t val;
140 151
141 val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask); 152 /* set pin direction */
142 val |= clock ? 0 : rec->put_clk_mask; 153 val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
143 WREG32(rec->put_clk_reg, val); 154 val |= clock ? 0 : rec->en_clk_mask;
155 WREG32(rec->en_clk_reg, val);
144} 156}
145 157
146static void set_data(void *i2c_priv, int data) 158static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
150 struct radeon_i2c_bus_rec *rec = &i2c->rec; 162 struct radeon_i2c_bus_rec *rec = &i2c->rec;
151 uint32_t val; 163 uint32_t val;
152 164
153 val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask); 165 /* set pin direction */
154 val |= data ? 0 : rec->put_data_mask; 166 val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
155 WREG32(rec->put_data_reg, val); 167 val |= data ? 0 : rec->en_data_mask;
168 WREG32(rec->en_data_reg, val);
156} 169}
157 170
158struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 171struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
159 struct radeon_i2c_bus_rec *rec, 172 struct radeon_i2c_bus_rec *rec,
160 const char *name) 173 const char *name)
161{ 174{
162 struct radeon_i2c_chan *i2c; 175 struct radeon_i2c_chan *i2c;
163 int ret; 176 int ret;
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
167 return NULL; 180 return NULL;
168 181
169 i2c->adapter.owner = THIS_MODULE; 182 i2c->adapter.owner = THIS_MODULE;
170 i2c->adapter.algo_data = &i2c->algo;
171 i2c->dev = dev; 183 i2c->dev = dev;
172 i2c->algo.setsda = set_data; 184 i2c_set_adapdata(&i2c->adapter, i2c);
173 i2c->algo.setscl = set_clock; 185 i2c->adapter.algo_data = &i2c->algo.bit;
174 i2c->algo.getsda = get_data; 186 i2c->algo.bit.setsda = set_data;
175 i2c->algo.getscl = get_clock; 187 i2c->algo.bit.setscl = set_clock;
176 i2c->algo.udelay = 20; 188 i2c->algo.bit.getsda = get_data;
189 i2c->algo.bit.getscl = get_clock;
190 i2c->algo.bit.udelay = 20;
177 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always 191 /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
178 * make this, 2 jiffies is a lot more reliable */ 192 * make this, 2 jiffies is a lot more reliable */
179 i2c->algo.timeout = 2; 193 i2c->algo.bit.timeout = 2;
180 i2c->algo.data = i2c; 194 i2c->algo.bit.data = i2c;
181 i2c->rec = *rec; 195 i2c->rec = *rec;
182 i2c_set_adapdata(&i2c->adapter, i2c);
183
184 ret = i2c_bit_add_bus(&i2c->adapter); 196 ret = i2c_bit_add_bus(&i2c->adapter);
185 if (ret) { 197 if (ret) {
186 DRM_INFO("Failed to register i2c %s\n", name); 198 DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@ out_free:
194 206
195} 207}
196 208
209struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
210 struct radeon_i2c_bus_rec *rec,
211 const char *name)
212{
213 struct radeon_i2c_chan *i2c;
214 int ret;
215
216 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
217 if (i2c == NULL)
218 return NULL;
219
220 i2c->rec = *rec;
221 i2c->adapter.owner = THIS_MODULE;
222 i2c->dev = dev;
223 i2c_set_adapdata(&i2c->adapter, i2c);
224 i2c->adapter.algo_data = &i2c->algo.dp;
225 i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
226 i2c->algo.dp.address = 0;
227 ret = i2c_dp_aux_add_bus(&i2c->adapter);
228 if (ret) {
229 DRM_INFO("Failed to register i2c %s\n", name);
230 goto out_free;
231 }
232
233 return i2c;
234out_free:
235 kfree(i2c);
236 return NULL;
237
238}
239
240
197void radeon_i2c_destroy(struct radeon_i2c_chan *i2c) 241void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
198{ 242{
199 if (!i2c) 243 if (!i2c)
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
207{ 251{
208 return NULL; 252 return NULL;
209} 253}
254
255void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
256 u8 slave_addr,
257 u8 addr,
258 u8 *val)
259{
260 u8 out_buf[2];
261 u8 in_buf[2];
262 struct i2c_msg msgs[] = {
263 {
264 .addr = slave_addr,
265 .flags = 0,
266 .len = 1,
267 .buf = out_buf,
268 },
269 {
270 .addr = slave_addr,
271 .flags = I2C_M_RD,
272 .len = 1,
273 .buf = in_buf,
274 }
275 };
276
277 out_buf[0] = addr;
278 out_buf[1] = 0;
279
280 if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
281 *val = in_buf[0];
282 DRM_DEBUG("val = 0x%02x\n", *val);
283 } else {
284 DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
285 addr, *val);
286 }
287}
288
289void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
290 u8 slave_addr,
291 u8 addr,
292 u8 val)
293{
294 uint8_t out_buf[2];
295 struct i2c_msg msg = {
296 .addr = slave_addr,
297 .flags = 0,
298 .len = 2,
299 .buf = out_buf,
300 };
301
302 out_buf[0] = addr;
303 out_buf[1] = val;
304
305 if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
306 DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
307 addr, val);
308}
309
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index a0fe6232dcb6..9223296fe37b 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
39 return radeon_irq_process(rdev); 39 return radeon_irq_process(rdev);
40} 40}
41 41
42/*
43 * Handle hotplug events outside the interrupt handler proper.
44 */
45static void radeon_hotplug_work_func(struct work_struct *work)
46{
47 struct radeon_device *rdev = container_of(work, struct radeon_device,
48 hotplug_work);
49 struct drm_device *dev = rdev->ddev;
50 struct drm_mode_config *mode_config = &dev->mode_config;
51 struct drm_connector *connector;
52
53 if (mode_config->num_connector) {
54 list_for_each_entry(connector, &mode_config->connector_list, head)
55 radeon_connector_hotplug(connector);
56 }
57 /* Just fire off a uevent and let userspace tell us what to do */
58 drm_sysfs_hotplug_event(dev);
59}
60
42void radeon_driver_irq_preinstall_kms(struct drm_device *dev) 61void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
43{ 62{
44 struct radeon_device *rdev = dev->dev_private; 63 struct radeon_device *rdev = dev->dev_private;
45 unsigned i; 64 unsigned i;
46 65
66 INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
67
47 /* Disable *all* interrupts */ 68 /* Disable *all* interrupts */
48 rdev->irq.sw_int = false; 69 rdev->irq.sw_int = false;
49 for (i = 0; i < 2; i++) { 70 for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
87 108
88 if (rdev->flags & RADEON_SINGLE_CRTC) 109 if (rdev->flags & RADEON_SINGLE_CRTC)
89 num_crtc = 1; 110 num_crtc = 1;
90 111 spin_lock_init(&rdev->irq.sw_lock);
91 r = drm_vblank_init(rdev->ddev, num_crtc); 112 r = drm_vblank_init(rdev->ddev, num_crtc);
92 if (r) { 113 if (r) {
93 return r; 114 return r;
94 } 115 }
95 /* enable msi */ 116 /* enable msi */
96 rdev->msi_enabled = 0; 117 rdev->msi_enabled = 0;
97 if (rdev->family >= CHIP_RV380) { 118 /* MSIs don't seem to work on my rs780;
119 * not sure about rs880 or other rs780s.
120 * Needs more investigation.
121 */
122 if ((rdev->family >= CHIP_RV380) &&
123 (rdev->family != CHIP_RS780) &&
124 (rdev->family != CHIP_RS880)) {
98 int ret = pci_enable_msi(rdev->pdev); 125 int ret = pci_enable_msi(rdev->pdev);
99 if (!ret) 126 if (!ret) {
100 rdev->msi_enabled = 1; 127 rdev->msi_enabled = 1;
128 DRM_INFO("radeon: using MSI.\n");
129 }
101 } 130 }
102 drm_irq_install(rdev->ddev); 131 drm_irq_install(rdev->ddev);
103 rdev->irq.installed = true; 132 rdev->irq.installed = true;
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
114 pci_disable_msi(rdev->pdev); 143 pci_disable_msi(rdev->pdev);
115 } 144 }
116} 145}
146
147void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
148{
149 unsigned long irqflags;
150
151 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
152 if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
153 rdev->irq.sw_int = true;
154 radeon_irq_set(rdev);
155 }
156 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
157}
158
159void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
160{
161 unsigned long irqflags;
162
163 spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
164 BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
165 if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
166 rdev->irq.sw_int = false;
167 radeon_irq_set(rdev);
168 }
169 spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
170}
171
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ba128621057a..f23b05606eb5 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,10 +30,19 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "radeon_drm.h" 31#include "radeon_drm.h"
32 32
33int radeon_driver_unload_kms(struct drm_device *dev)
34{
35 struct radeon_device *rdev = dev->dev_private;
36
37 if (rdev == NULL)
38 return 0;
39 radeon_modeset_fini(rdev);
40 radeon_device_fini(rdev);
41 kfree(rdev);
42 dev->dev_private = NULL;
43 return 0;
44}
33 45
34/*
35 * Driver load/unload
36 */
37int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) 46int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
38{ 47{
39 struct radeon_device *rdev; 48 struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
62 */ 71 */
63 r = radeon_device_init(rdev, dev, dev->pdev, flags); 72 r = radeon_device_init(rdev, dev, dev->pdev, flags);
64 if (r) { 73 if (r) {
65 DRM_ERROR("Fatal error while trying to initialize radeon.\n"); 74 dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
66 return r; 75 goto out;
67 } 76 }
68 /* Again modeset_init should fail only on fatal error 77 /* Again modeset_init should fail only on fatal error
69 * otherwise it should provide enough functionalities 78 * otherwise it should provide enough functionalities
70 * for shadowfb to run 79 * for shadowfb to run
71 */ 80 */
72 r = radeon_modeset_init(rdev); 81 r = radeon_modeset_init(rdev);
73 if (r) { 82 if (r)
74 return r; 83 dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
75 } 84out:
76 return 0; 85 if (r)
77} 86 radeon_driver_unload_kms(dev);
78 87 return r;
79int radeon_driver_unload_kms(struct drm_device *dev)
80{
81 struct radeon_device *rdev = dev->dev_private;
82
83 if (rdev == NULL)
84 return 0;
85 radeon_modeset_fini(rdev);
86 radeon_device_fini(rdev);
87 kfree(rdev);
88 dev->dev_private = NULL;
89 return 0;
90} 88}
91 89
92 90
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 8d0b7aa87fa4..b82ede98e152 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -30,6 +30,18 @@
30#include "radeon.h" 30#include "radeon.h"
31#include "atom.h" 31#include "atom.h"
32 32
33static void radeon_overscan_setup(struct drm_crtc *crtc,
34 struct drm_display_mode *mode)
35{
36 struct drm_device *dev = crtc->dev;
37 struct radeon_device *rdev = dev->dev_private;
38 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
39
40 WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
41 WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
42 WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
43}
44
33static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, 45static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
34 struct drm_display_mode *mode, 46 struct drm_display_mode *mode,
35 struct drm_display_mode *adjusted_mode) 47 struct drm_display_mode *adjusted_mode)
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
292 uint32_t mask; 304 uint32_t mask;
293 305
294 if (radeon_crtc->crtc_id) 306 if (radeon_crtc->crtc_id)
295 mask = (RADEON_CRTC2_EN | 307 mask = (RADEON_CRTC2_DISP_DIS |
296 RADEON_CRTC2_DISP_DIS |
297 RADEON_CRTC2_VSYNC_DIS | 308 RADEON_CRTC2_VSYNC_DIS |
298 RADEON_CRTC2_HSYNC_DIS | 309 RADEON_CRTC2_HSYNC_DIS |
299 RADEON_CRTC2_DISP_REQ_EN_B); 310 RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
305 switch (mode) { 316 switch (mode) {
306 case DRM_MODE_DPMS_ON: 317 case DRM_MODE_DPMS_ON:
307 if (radeon_crtc->crtc_id) 318 if (radeon_crtc->crtc_id)
308 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask); 319 WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
309 else { 320 else {
310 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN | 321 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
311 RADEON_CRTC_DISP_REQ_EN_B)); 322 RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
319 case DRM_MODE_DPMS_OFF: 330 case DRM_MODE_DPMS_OFF:
320 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); 331 drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
321 if (radeon_crtc->crtc_id) 332 if (radeon_crtc->crtc_id)
322 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); 333 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
323 else { 334 else {
324 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN | 335 WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
325 RADEON_CRTC_DISP_REQ_EN_B)); 336 RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
400 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 411 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
401 struct radeon_framebuffer *radeon_fb; 412 struct radeon_framebuffer *radeon_fb;
402 struct drm_gem_object *obj; 413 struct drm_gem_object *obj;
414 struct radeon_bo *rbo;
403 uint64_t base; 415 uint64_t base;
404 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; 416 uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
405 uint32_t crtc_pitch, pitch_pixels; 417 uint32_t crtc_pitch, pitch_pixels;
406 uint32_t tiling_flags; 418 uint32_t tiling_flags;
407 int format; 419 int format;
408 uint32_t gen_cntl_reg, gen_cntl_val; 420 uint32_t gen_cntl_reg, gen_cntl_val;
421 int r;
409 422
410 DRM_DEBUG("\n"); 423 DRM_DEBUG("\n");
424 /* no fb bound */
425 if (!crtc->fb) {
426 DRM_DEBUG("No FB bound\n");
427 return 0;
428 }
411 429
412 radeon_fb = to_radeon_framebuffer(crtc->fb); 430 radeon_fb = to_radeon_framebuffer(crtc->fb);
413 431
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
431 return false; 449 return false;
432 } 450 }
433 451
452 /* Pin framebuffer & get tilling informations */
434 obj = radeon_fb->obj; 453 obj = radeon_fb->obj;
435 if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { 454 rbo = obj->driver_private;
455 r = radeon_bo_reserve(rbo, false);
456 if (unlikely(r != 0))
457 return r;
458 r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
459 if (unlikely(r != 0)) {
460 radeon_bo_unreserve(rbo);
436 return -EINVAL; 461 return -EINVAL;
437 } 462 }
463 radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
464 radeon_bo_unreserve(rbo);
465 if (tiling_flags & RADEON_TILING_MICRO)
466 DRM_ERROR("trying to scanout microtiled buffer\n");
467
438 /* if scanout was in GTT this really wouldn't work */ 468 /* if scanout was in GTT this really wouldn't work */
439 /* crtc offset is from display base addr not FB location */ 469 /* crtc offset is from display base addr not FB location */
440 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; 470 radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
449 (crtc->fb->bits_per_pixel * 8)); 479 (crtc->fb->bits_per_pixel * 8));
450 crtc_pitch |= crtc_pitch << 16; 480 crtc_pitch |= crtc_pitch << 16;
451 481
452 radeon_object_get_tiling_flags(obj->driver_private,
453 &tiling_flags, NULL);
454 if (tiling_flags & RADEON_TILING_MICRO)
455 DRM_ERROR("trying to scanout microtiled buffer\n");
456 482
457 if (tiling_flags & RADEON_TILING_MACRO) { 483 if (tiling_flags & RADEON_TILING_MACRO) {
458 if (ASIC_IS_R300(rdev)) 484 if (ASIC_IS_R300(rdev))
@@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
530 556
531 if (old_fb && old_fb != crtc->fb) { 557 if (old_fb && old_fb != crtc->fb) {
532 radeon_fb = to_radeon_framebuffer(old_fb); 558 radeon_fb = to_radeon_framebuffer(old_fb);
533 radeon_gem_object_unpin(radeon_fb->obj); 559 rbo = radeon_fb->obj->driver_private;
560 r = radeon_bo_reserve(rbo, false);
561 if (unlikely(r != 0))
562 return r;
563 radeon_bo_unpin(rbo);
564 radeon_bo_unreserve(rbo);
534 } 565 }
535 566
536 /* Bytes per pixel may have changed */ 567 /* Bytes per pixel may have changed */
@@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
642 uint32_t crtc2_gen_cntl; 673 uint32_t crtc2_gen_cntl;
643 uint32_t disp2_merge_cntl; 674 uint32_t disp2_merge_cntl;
644 675
645 /* check to see if TV DAC is enabled for another crtc and keep it enabled */ 676 /* if TV DAC is enabled for another crtc and keep it enabled */
646 if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON) 677 crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
647 crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
648 else
649 crtc2_gen_cntl = 0;
650
651 crtc2_gen_cntl |= ((format << 8) 678 crtc2_gen_cntl |= ((format << 8)
652 | RADEON_CRTC2_VSYNC_DIS 679 | RADEON_CRTC2_VSYNC_DIS
653 | RADEON_CRTC2_HSYNC_DIS 680 | RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
676 uint32_t crtc_ext_cntl; 703 uint32_t crtc_ext_cntl;
677 uint32_t disp_merge_cntl; 704 uint32_t disp_merge_cntl;
678 705
679 crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN 706 crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
707 crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
680 | (format << 8) 708 | (format << 8)
681 | RADEON_CRTC_DISP_REQ_EN_B 709 | RADEON_CRTC_DISP_REQ_EN_B
682 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN) 710 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
779 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 807 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
780 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; 808 pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
781 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { 809 if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
782 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 810 if (!rdev->is_atom_bios) {
783 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; 811 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
784 if (lvds) { 812 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
785 if (lvds->use_bios_dividers) { 813 if (lvds) {
786 pll_ref_div = lvds->panel_ref_divider; 814 if (lvds->use_bios_dividers) {
787 pll_fb_post_div = (lvds->panel_fb_divider | 815 pll_ref_div = lvds->panel_ref_divider;
788 (lvds->panel_post_divider << 16)); 816 pll_fb_post_div = (lvds->panel_fb_divider |
789 htotal_cntl = 0; 817 (lvds->panel_post_divider << 16));
790 use_bios_divs = true; 818 htotal_cntl = 0;
819 use_bios_divs = true;
820 }
791 } 821 }
792 } 822 }
793 pll_flags |= RADEON_PLL_USE_REF_DIV; 823 pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1027 radeon_crtc_set_base(crtc, x, y, old_fb); 1057 radeon_crtc_set_base(crtc, x, y, old_fb);
1028 radeon_set_crtc_timing(crtc, adjusted_mode); 1058 radeon_set_crtc_timing(crtc, adjusted_mode);
1029 radeon_set_pll(crtc, adjusted_mode); 1059 radeon_set_pll(crtc, adjusted_mode);
1060 radeon_overscan_setup(crtc, adjusted_mode);
1030 if (radeon_crtc->crtc_id == 0) { 1061 if (radeon_crtc->crtc_id == 0) {
1031 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); 1062 radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
1032 } else { 1063 } else {
@@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1042 1073
1043static void radeon_crtc_prepare(struct drm_crtc *crtc) 1074static void radeon_crtc_prepare(struct drm_crtc *crtc)
1044{ 1075{
1045 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 1076 struct drm_device *dev = crtc->dev;
1077 struct drm_crtc *crtci;
1078
1079 /*
1080 * The hardware wedges sometimes if you reconfigure one CRTC
1081 * whilst another is running (see fdo bug #24611).
1082 */
1083 list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
1084 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
1046} 1085}
1047 1086
1048static void radeon_crtc_commit(struct drm_crtc *crtc) 1087static void radeon_crtc_commit(struct drm_crtc *crtc)
1049{ 1088{
1050 radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1089 struct drm_device *dev = crtc->dev;
1090 struct drm_crtc *crtci;
1091
1092 /*
1093 * Reenable the CRTCs that should be running.
1094 */
1095 list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
1096 if (crtci->enabled)
1097 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
1098 }
1051} 1099}
1052 1100
1053static const struct drm_crtc_helper_funcs legacy_helper_funcs = { 1101static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 00382122869b..df00515e81fa 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
136 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; 136 lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
137 137
138 lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL); 138 lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
139 if ((!rdev->is_atom_bios)) { 139 if (rdev->is_atom_bios) {
140 /* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
141 * need to call that on resume to set up the reg properly.
142 */
143 radeon_encoder->pixel_clock = adjusted_mode->clock;
144 atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
145 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
146 } else {
140 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv; 147 struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
141 if (lvds) { 148 if (lvds) {
142 DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl); 149 DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
147 (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT)); 154 (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
148 } else 155 } else
149 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL); 156 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
150 } else 157 }
151 lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
152 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS; 158 lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
153 lvds_gen_cntl &= ~(RADEON_LVDS_ON | 159 lvds_gen_cntl &= ~(RADEON_LVDS_ON |
154 RADEON_LVDS_BLON | 160 RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
184 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 190 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
185} 191}
186 192
187static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder, 193static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
188 struct drm_display_mode *mode, 194 struct drm_display_mode *mode,
189 struct drm_display_mode *adjusted_mode) 195 struct drm_display_mode *adjusted_mode)
190{ 196{
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 197 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 198
@@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
194 radeon_encoder_set_active_device(encoder); 200 radeon_encoder_set_active_device(encoder);
195 drm_mode_set_crtcinfo(adjusted_mode, 0); 201 drm_mode_set_crtcinfo(adjusted_mode, 0);
196 202
197 if (radeon_encoder->rmx_type != RMX_OFF) 203 /* get the native mode for LVDS */
198 radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); 204 if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
205 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
206 int mode_id = adjusted_mode->base.id;
207 *adjusted_mode = *native_mode;
208 adjusted_mode->hdisplay = mode->hdisplay;
209 adjusted_mode->vdisplay = mode->vdisplay;
210 adjusted_mode->base.id = mode_id;
211 }
199 212
200 return true; 213 return true;
201} 214}
202 215
203static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = { 216static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
204 .dpms = radeon_legacy_lvds_dpms, 217 .dpms = radeon_legacy_lvds_dpms,
205 .mode_fixup = radeon_legacy_lvds_mode_fixup, 218 .mode_fixup = radeon_legacy_mode_fixup,
206 .prepare = radeon_legacy_lvds_prepare, 219 .prepare = radeon_legacy_lvds_prepare,
207 .mode_set = radeon_legacy_lvds_mode_set, 220 .mode_set = radeon_legacy_lvds_mode_set,
208 .commit = radeon_legacy_lvds_commit, 221 .commit = radeon_legacy_lvds_commit,
@@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
214 .destroy = radeon_enc_destroy, 227 .destroy = radeon_enc_destroy,
215}; 228};
216 229
217static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
218 struct drm_display_mode *mode,
219 struct drm_display_mode *adjusted_mode)
220{
221 /* set the active encoder to connector routing */
222 radeon_encoder_set_active_device(encoder);
223 drm_mode_set_crtcinfo(adjusted_mode, 0);
224
225 return true;
226}
227
228static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode) 230static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
229{ 231{
230 struct drm_device *dev = encoder->dev; 232 struct drm_device *dev = encoder->dev;
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
410 412
411static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = { 413static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
412 .dpms = radeon_legacy_primary_dac_dpms, 414 .dpms = radeon_legacy_primary_dac_dpms,
413 .mode_fixup = radeon_legacy_primary_dac_mode_fixup, 415 .mode_fixup = radeon_legacy_mode_fixup,
414 .prepare = radeon_legacy_primary_dac_prepare, 416 .prepare = radeon_legacy_primary_dac_prepare,
415 .mode_set = radeon_legacy_primary_dac_mode_set, 417 .mode_set = radeon_legacy_primary_dac_mode_set,
416 .commit = radeon_legacy_primary_dac_commit, 418 .commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
423 .destroy = radeon_enc_destroy, 425 .destroy = radeon_enc_destroy,
424}; 426};
425 427
426static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
427 struct drm_display_mode *mode,
428 struct drm_display_mode *adjusted_mode)
429{
430
431 drm_mode_set_crtcinfo(adjusted_mode, 0);
432
433 return true;
434}
435
436static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode) 428static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
437{ 429{
438 struct drm_device *dev = encoder->dev; 430 struct drm_device *dev = encoder->dev;
@@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
584 576
585static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = { 577static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
586 .dpms = radeon_legacy_tmds_int_dpms, 578 .dpms = radeon_legacy_tmds_int_dpms,
587 .mode_fixup = radeon_legacy_tmds_int_mode_fixup, 579 .mode_fixup = radeon_legacy_mode_fixup,
588 .prepare = radeon_legacy_tmds_int_prepare, 580 .prepare = radeon_legacy_tmds_int_prepare,
589 .mode_set = radeon_legacy_tmds_int_mode_set, 581 .mode_set = radeon_legacy_tmds_int_mode_set,
590 .commit = radeon_legacy_tmds_int_commit, 582 .commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
596 .destroy = radeon_enc_destroy, 588 .destroy = radeon_enc_destroy,
597}; 589};
598 590
599static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
600 struct drm_display_mode *mode,
601 struct drm_display_mode *adjusted_mode)
602{
603 /* set the active encoder to connector routing */
604 radeon_encoder_set_active_device(encoder);
605 drm_mode_set_crtcinfo(adjusted_mode, 0);
606
607 return true;
608}
609
610static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode) 591static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
611{ 592{
612 struct drm_device *dev = encoder->dev; 593 struct drm_device *dev = encoder->dev;
@@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
697 /*if (mode->clock > 165000) 678 /*if (mode->clock > 165000)
698 fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/ 679 fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
699 } 680 }
681 if (!radeon_combios_external_tmds_setup(encoder))
682 radeon_external_tmds_setup(encoder);
700 } 683 }
701 684
702 if (radeon_crtc->crtc_id == 0) { 685 if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
724 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); 707 radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
725} 708}
726 709
710static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
711{
712 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
713 struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
714 if (tmds) {
715 if (tmds->i2c_bus)
716 radeon_i2c_destroy(tmds->i2c_bus);
717 }
718 kfree(radeon_encoder->enc_priv);
719 drm_encoder_cleanup(encoder);
720 kfree(radeon_encoder);
721}
722
727static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = { 723static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
728 .dpms = radeon_legacy_tmds_ext_dpms, 724 .dpms = radeon_legacy_tmds_ext_dpms,
729 .mode_fixup = radeon_legacy_tmds_ext_mode_fixup, 725 .mode_fixup = radeon_legacy_mode_fixup,
730 .prepare = radeon_legacy_tmds_ext_prepare, 726 .prepare = radeon_legacy_tmds_ext_prepare,
731 .mode_set = radeon_legacy_tmds_ext_mode_set, 727 .mode_set = radeon_legacy_tmds_ext_mode_set,
732 .commit = radeon_legacy_tmds_ext_commit, 728 .commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
735 731
736 732
737static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = { 733static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
738 .destroy = radeon_enc_destroy, 734 .destroy = radeon_ext_tmds_enc_destroy,
739}; 735};
740 736
741static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
742 struct drm_display_mode *mode,
743 struct drm_display_mode *adjusted_mode)
744{
745 /* set the active encoder to connector routing */
746 radeon_encoder_set_active_device(encoder);
747 drm_mode_set_crtcinfo(adjusted_mode, 0);
748
749 return true;
750}
751
752static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode) 737static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
753{ 738{
754 struct drm_device *dev = encoder->dev; 739 struct drm_device *dev = encoder->dev;
@@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1265 1250
1266static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = { 1251static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
1267 .dpms = radeon_legacy_tv_dac_dpms, 1252 .dpms = radeon_legacy_tv_dac_dpms,
1268 .mode_fixup = radeon_legacy_tv_dac_mode_fixup, 1253 .mode_fixup = radeon_legacy_mode_fixup,
1269 .prepare = radeon_legacy_tv_dac_prepare, 1254 .prepare = radeon_legacy_tv_dac_prepare,
1270 .mode_set = radeon_legacy_tv_dac_mode_set, 1255 .mode_set = radeon_legacy_tv_dac_mode_set,
1271 .commit = radeon_legacy_tv_dac_commit, 1256 .commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
1302 return tmds; 1287 return tmds;
1303} 1288}
1304 1289
1290static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
1291{
1292 struct drm_device *dev = encoder->base.dev;
1293 struct radeon_device *rdev = dev->dev_private;
1294 struct radeon_encoder_ext_tmds *tmds = NULL;
1295 bool ret;
1296
1297 if (rdev->is_atom_bios)
1298 return NULL;
1299
1300 tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
1301
1302 if (!tmds)
1303 return NULL;
1304
1305 ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
1306
1307 if (ret == false)
1308 radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
1309
1310 return tmds;
1311}
1312
1305void 1313void
1306radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) 1314radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
1307{ 1315{
@@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1329 encoder->possible_crtcs = 0x1; 1337 encoder->possible_crtcs = 0x1;
1330 else 1338 else
1331 encoder->possible_crtcs = 0x3; 1339 encoder->possible_crtcs = 0x3;
1332 encoder->possible_clones = 0;
1333 1340
1334 radeon_encoder->enc_priv = NULL; 1341 radeon_encoder->enc_priv = NULL;
1335 1342
@@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
1373 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS); 1380 drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
1374 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs); 1381 drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
1375 if (!rdev->is_atom_bios) 1382 if (!rdev->is_atom_bios)
1376 radeon_combios_get_ext_tmds_info(radeon_encoder); 1383 radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
1377 break; 1384 break;
1378 } 1385 }
1379} 1386}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ace726aa0d76..44d4b652ea12 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -33,6 +33,7 @@
33#include <drm_crtc.h> 33#include <drm_crtc.h>
34#include <drm_mode.h> 34#include <drm_mode.h>
35#include <drm_edid.h> 35#include <drm_edid.h>
36#include <drm_dp_helper.h>
36#include <linux/i2c.h> 37#include <linux/i2c.h>
37#include <linux/i2c-id.h> 38#include <linux/i2c-id.h>
38#include <linux/i2c-algo-bit.h> 39#include <linux/i2c-algo-bit.h>
@@ -89,24 +90,45 @@ enum radeon_tv_std {
89 TV_STD_PAL_CN, 90 TV_STD_PAL_CN,
90}; 91};
91 92
93/* radeon gpio-based i2c
94 * 1. "mask" reg and bits
95 * grabs the gpio pins for software use
96 * 0=not held 1=held
97 * 2. "a" reg and bits
98 * output pin value
99 * 0=low 1=high
100 * 3. "en" reg and bits
101 * sets the pin direction
102 * 0=input 1=output
103 * 4. "y" reg and bits
104 * input pin value
105 * 0=low 1=high
106 */
92struct radeon_i2c_bus_rec { 107struct radeon_i2c_bus_rec {
93 bool valid; 108 bool valid;
109 /* id used by atom */
110 uint8_t i2c_id;
111 /* can be used with hw i2c engine */
112 bool hw_capable;
113 /* uses multi-media i2c engine */
114 bool mm_i2c;
115 /* regs and bits */
94 uint32_t mask_clk_reg; 116 uint32_t mask_clk_reg;
95 uint32_t mask_data_reg; 117 uint32_t mask_data_reg;
96 uint32_t a_clk_reg; 118 uint32_t a_clk_reg;
97 uint32_t a_data_reg; 119 uint32_t a_data_reg;
98 uint32_t put_clk_reg; 120 uint32_t en_clk_reg;
99 uint32_t put_data_reg; 121 uint32_t en_data_reg;
100 uint32_t get_clk_reg; 122 uint32_t y_clk_reg;
101 uint32_t get_data_reg; 123 uint32_t y_data_reg;
102 uint32_t mask_clk_mask; 124 uint32_t mask_clk_mask;
103 uint32_t mask_data_mask; 125 uint32_t mask_data_mask;
104 uint32_t put_clk_mask;
105 uint32_t put_data_mask;
106 uint32_t get_clk_mask;
107 uint32_t get_data_mask;
108 uint32_t a_clk_mask; 126 uint32_t a_clk_mask;
109 uint32_t a_data_mask; 127 uint32_t a_data_mask;
128 uint32_t en_clk_mask;
129 uint32_t en_data_mask;
130 uint32_t y_clk_mask;
131 uint32_t y_data_mask;
110}; 132};
111 133
112struct radeon_tmds_pll { 134struct radeon_tmds_pll {
@@ -150,9 +172,12 @@ struct radeon_pll {
150}; 172};
151 173
152struct radeon_i2c_chan { 174struct radeon_i2c_chan {
153 struct drm_device *dev;
154 struct i2c_adapter adapter; 175 struct i2c_adapter adapter;
155 struct i2c_algo_bit_data algo; 176 struct drm_device *dev;
177 union {
178 struct i2c_algo_dp_aux_data dp;
179 struct i2c_algo_bit_data bit;
180 } algo;
156 struct radeon_i2c_bus_rec rec; 181 struct radeon_i2c_bus_rec rec;
157}; 182};
158 183
@@ -170,6 +195,11 @@ enum radeon_connector_table {
170 CT_EMAC, 195 CT_EMAC,
171}; 196};
172 197
198enum radeon_dvo_chip {
199 DVO_SIL164,
200 DVO_SIL1178,
201};
202
173struct radeon_mode_info { 203struct radeon_mode_info {
174 struct atom_context *atom_context; 204 struct atom_context *atom_context;
175 struct card_info *atom_card_info; 205 struct card_info *atom_card_info;
@@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds {
261 struct radeon_tmds_pll tmds_pll[4]; 291 struct radeon_tmds_pll tmds_pll[4];
262}; 292};
263 293
294struct radeon_encoder_ext_tmds {
295 /* tmds over dvo */
296 struct radeon_i2c_chan *i2c_bus;
297 uint8_t slave_addr;
298 enum radeon_dvo_chip dvo_chip;
299};
300
264/* spread spectrum */ 301/* spread spectrum */
265struct radeon_atom_ss { 302struct radeon_atom_ss {
266 uint16_t percentage; 303 uint16_t percentage;
@@ -302,6 +339,35 @@ struct radeon_encoder {
302struct radeon_connector_atom_dig { 339struct radeon_connector_atom_dig {
303 uint32_t igp_lane_info; 340 uint32_t igp_lane_info;
304 bool linkb; 341 bool linkb;
342 /* displayport */
343 struct radeon_i2c_chan *dp_i2c_bus;
344 u8 dpcd[8];
345 u8 dp_sink_type;
346 int dp_clock;
347 int dp_lane_count;
348};
349
350struct radeon_gpio_rec {
351 bool valid;
352 u8 id;
353 u32 reg;
354 u32 mask;
355};
356
357enum radeon_hpd_id {
358 RADEON_HPD_NONE = 0,
359 RADEON_HPD_1,
360 RADEON_HPD_2,
361 RADEON_HPD_3,
362 RADEON_HPD_4,
363 RADEON_HPD_5,
364 RADEON_HPD_6,
365};
366
367struct radeon_hpd {
368 enum radeon_hpd_id hpd;
369 u8 plugged_state;
370 struct radeon_gpio_rec gpio;
305}; 371};
306 372
307struct radeon_connector { 373struct radeon_connector {
@@ -318,6 +384,7 @@ struct radeon_connector {
318 void *con_priv; 384 void *con_priv;
319 bool dac_load_detect; 385 bool dac_load_detect;
320 uint16_t connector_object_id; 386 uint16_t connector_object_id;
387 struct radeon_hpd hpd;
321}; 388};
322 389
323struct radeon_framebuffer { 390struct radeon_framebuffer {
@@ -325,10 +392,37 @@ struct radeon_framebuffer {
325 struct drm_gem_object *obj; 392 struct drm_gem_object *obj;
326}; 393};
327 394
395extern void radeon_connector_hotplug(struct drm_connector *connector);
396extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
397extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
398 struct drm_display_mode *mode);
399extern void radeon_dp_set_link_config(struct drm_connector *connector,
400 struct drm_display_mode *mode);
401extern void dp_link_train(struct drm_encoder *encoder,
402 struct drm_connector *connector);
403extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
404extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
405extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
406 int action, uint8_t lane_num,
407 uint8_t lane_set);
408extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
409 uint8_t write_byte, uint8_t *read_byte);
410
411extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
412 struct radeon_i2c_bus_rec *rec,
413 const char *name);
328extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, 414extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
329 struct radeon_i2c_bus_rec *rec, 415 struct radeon_i2c_bus_rec *rec,
330 const char *name); 416 const char *name);
331extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c); 417extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
418extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
419 u8 slave_addr,
420 u8 addr,
421 u8 *val);
422extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
423 u8 slave_addr,
424 u8 addr,
425 u8 val);
332extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector); 426extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
333extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector); 427extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
334 428
@@ -343,12 +437,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
343 uint32_t *post_div_p, 437 uint32_t *post_div_p,
344 int flags); 438 int flags);
345 439
440extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
441 uint64_t freq,
442 uint32_t *dot_clock_p,
443 uint32_t *fb_div_p,
444 uint32_t *frac_fb_div_p,
445 uint32_t *ref_div_p,
446 uint32_t *post_div_p,
447 int flags);
448
449extern void radeon_setup_encoder_clones(struct drm_device *dev);
450
346struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index); 451struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
347struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv); 452struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
348struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv); 453struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
349struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index); 454struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
350struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); 455struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
351extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); 456extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
457extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
352extern int atombios_get_encoder_mode(struct drm_encoder *encoder); 458extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
353extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); 459extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
354 460
@@ -378,12 +484,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
378extern bool radeon_combios_get_clock_info(struct drm_device *dev); 484extern bool radeon_combios_get_clock_info(struct drm_device *dev);
379extern struct radeon_encoder_atom_dig * 485extern struct radeon_encoder_atom_dig *
380radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); 486radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
381bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, 487extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
382 struct radeon_encoder_int_tmds *tmds); 488 struct radeon_encoder_int_tmds *tmds);
383bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, 489extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
384 struct radeon_encoder_int_tmds *tmds); 490 struct radeon_encoder_int_tmds *tmds);
385bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, 491extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
386 struct radeon_encoder_int_tmds *tmds); 492 struct radeon_encoder_int_tmds *tmds);
493extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
494 struct radeon_encoder_ext_tmds *tmds);
495extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
496 struct radeon_encoder_ext_tmds *tmds);
387extern struct radeon_encoder_primary_dac * 497extern struct radeon_encoder_primary_dac *
388radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); 498radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
389extern struct radeon_encoder_tv_dac * 499extern struct radeon_encoder_tv_dac *
@@ -395,6 +505,8 @@ extern struct radeon_encoder_tv_dac *
395radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); 505radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
396extern struct radeon_encoder_primary_dac * 506extern struct radeon_encoder_primary_dac *
397radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder); 507radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
508extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
509extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
398extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); 510extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
399extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); 511extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
400extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); 512extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +538,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
426 struct radeon_crtc *radeon_crtc); 538 struct radeon_crtc *radeon_crtc);
427void radeon_legacy_init_crtc(struct drm_device *dev, 539void radeon_legacy_init_crtc(struct drm_device *dev,
428 struct radeon_crtc *radeon_crtc); 540 struct radeon_crtc *radeon_crtc);
429void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state); 541extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
430 542
431void radeon_get_clock_info(struct drm_device *dev); 543void radeon_get_clock_info(struct drm_device *dev);
432 544
433extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev); 545extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
434extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev); 546extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
435 547
436void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
437 struct drm_display_mode *mode,
438 struct drm_display_mode *adjusted_mode);
439void radeon_enc_destroy(struct drm_encoder *encoder); 548void radeon_enc_destroy(struct drm_encoder *encoder);
440void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); 549void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
441void radeon_combios_asic_init(struct drm_device *dev); 550void radeon_combios_asic_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1f056dadc5c2..544e18ffaf22 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -34,100 +34,53 @@
34#include "radeon_drm.h" 34#include "radeon_drm.h"
35#include "radeon.h" 35#include "radeon.h"
36 36
37struct radeon_object {
38 struct ttm_buffer_object tobj;
39 struct list_head list;
40 struct radeon_device *rdev;
41 struct drm_gem_object *gobj;
42 struct ttm_bo_kmap_obj kmap;
43 unsigned pin_count;
44 uint64_t gpu_addr;
45 void *kptr;
46 bool is_iomem;
47 uint32_t tiling_flags;
48 uint32_t pitch;
49 int surface_reg;
50};
51 37
52int radeon_ttm_init(struct radeon_device *rdev); 38int radeon_ttm_init(struct radeon_device *rdev);
53void radeon_ttm_fini(struct radeon_device *rdev); 39void radeon_ttm_fini(struct radeon_device *rdev);
40static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
54 41
55/* 42/*
56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 43 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
57 * function are calling it. 44 * function are calling it.
58 */ 45 */
59 46
60static int radeon_object_reserve(struct radeon_object *robj, bool interruptible) 47static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
61{ 48{
62 return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0); 49 struct radeon_bo *bo;
63}
64 50
65static void radeon_object_unreserve(struct radeon_object *robj) 51 bo = container_of(tbo, struct radeon_bo, tbo);
66{ 52 mutex_lock(&bo->rdev->gem.mutex);
67 ttm_bo_unreserve(&robj->tobj); 53 list_del_init(&bo->list);
54 mutex_unlock(&bo->rdev->gem.mutex);
55 radeon_bo_clear_surface_reg(bo);
56 kfree(bo);
68} 57}
69 58
70static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj) 59void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
71{ 60{
72 struct radeon_object *robj; 61 u32 c = 0;
73 62
74 robj = container_of(tobj, struct radeon_object, tobj); 63 rbo->placement.fpfn = 0;
75 list_del_init(&robj->list); 64 rbo->placement.lpfn = 0;
76 radeon_object_clear_surface_reg(robj); 65 rbo->placement.placement = rbo->placements;
77 kfree(robj); 66 rbo->placement.busy_placement = rbo->placements;
67 if (domain & RADEON_GEM_DOMAIN_VRAM)
68 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
69 TTM_PL_FLAG_VRAM;
70 if (domain & RADEON_GEM_DOMAIN_GTT)
71 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
72 if (domain & RADEON_GEM_DOMAIN_CPU)
73 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
74 rbo->placement.num_placement = c;
75 rbo->placement.num_busy_placement = c;
78} 76}
79 77
80static inline void radeon_object_gpu_addr(struct radeon_object *robj) 78int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
79 unsigned long size, bool kernel, u32 domain,
80 struct radeon_bo **bo_ptr)
81{ 81{
82 /* Default gpu address */ 82 struct radeon_bo *bo;
83 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
84 if (robj->tobj.mem.mm_node == NULL) {
85 return;
86 }
87 robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
88 switch (robj->tobj.mem.mem_type) {
89 case TTM_PL_VRAM:
90 robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
91 break;
92 case TTM_PL_TT:
93 robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
94 break;
95 default:
96 DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
97 robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
98 return;
99 }
100}
101
102static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
103{
104 uint32_t flags = 0;
105 if (domain & RADEON_GEM_DOMAIN_VRAM) {
106 flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
107 }
108 if (domain & RADEON_GEM_DOMAIN_GTT) {
109 flags |= TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
110 }
111 if (domain & RADEON_GEM_DOMAIN_CPU) {
112 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
113 }
114 if (!flags) {
115 flags |= TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
116 }
117 return flags;
118}
119
120int radeon_object_create(struct radeon_device *rdev,
121 struct drm_gem_object *gobj,
122 unsigned long size,
123 bool kernel,
124 uint32_t domain,
125 bool interruptible,
126 struct radeon_object **robj_ptr)
127{
128 struct radeon_object *robj;
129 enum ttm_bo_type type; 83 enum ttm_bo_type type;
130 uint32_t flags;
131 int r; 84 int r;
132 85
133 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 86 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +91,125 @@ int radeon_object_create(struct radeon_device *rdev,
138 } else { 91 } else {
139 type = ttm_bo_type_device; 92 type = ttm_bo_type_device;
140 } 93 }
141 *robj_ptr = NULL; 94 *bo_ptr = NULL;
142 robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL); 95 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
143 if (robj == NULL) { 96 if (bo == NULL)
144 return -ENOMEM; 97 return -ENOMEM;
145 } 98 bo->rdev = rdev;
146 robj->rdev = rdev; 99 bo->gobj = gobj;
147 robj->gobj = gobj; 100 bo->surface_reg = -1;
148 robj->surface_reg = -1; 101 INIT_LIST_HEAD(&bo->list);
149 INIT_LIST_HEAD(&robj->list); 102
150 103 radeon_ttm_placement_from_domain(bo, domain);
151 flags = radeon_object_flags_from_domain(domain); 104 /* Kernel allocation are uninterruptible */
152 r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags, 105 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
153 0, 0, false, NULL, size, 106 &bo->placement, 0, 0, !kernel, NULL, size,
154 &radeon_ttm_object_object_destroy); 107 &radeon_ttm_bo_destroy);
155 if (unlikely(r != 0)) { 108 if (unlikely(r != 0)) {
156 /* ttm call radeon_ttm_object_object_destroy if error happen */ 109 if (r != -ERESTARTSYS)
157 DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n", 110 dev_err(rdev->dev,
158 size, flags, 0); 111 "object_init failed for (%lu, 0x%08X)\n",
112 size, domain);
159 return r; 113 return r;
160 } 114 }
161 *robj_ptr = robj; 115 *bo_ptr = bo;
162 if (gobj) { 116 if (gobj) {
163 list_add_tail(&robj->list, &rdev->gem.objects); 117 mutex_lock(&bo->rdev->gem.mutex);
118 list_add_tail(&bo->list, &rdev->gem.objects);
119 mutex_unlock(&bo->rdev->gem.mutex);
164 } 120 }
165 return 0; 121 return 0;
166} 122}
167 123
168int radeon_object_kmap(struct radeon_object *robj, void **ptr) 124int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
169{ 125{
126 bool is_iomem;
170 int r; 127 int r;
171 128
172 spin_lock(&robj->tobj.lock); 129 if (bo->kptr) {
173 if (robj->kptr) {
174 if (ptr) { 130 if (ptr) {
175 *ptr = robj->kptr; 131 *ptr = bo->kptr;
176 } 132 }
177 spin_unlock(&robj->tobj.lock);
178 return 0; 133 return 0;
179 } 134 }
180 spin_unlock(&robj->tobj.lock); 135 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
181 r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
182 if (r) { 136 if (r) {
183 return r; 137 return r;
184 } 138 }
185 spin_lock(&robj->tobj.lock); 139 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
186 robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
187 spin_unlock(&robj->tobj.lock);
188 if (ptr) { 140 if (ptr) {
189 *ptr = robj->kptr; 141 *ptr = bo->kptr;
190 } 142 }
191 radeon_object_check_tiling(robj, 0, 0); 143 radeon_bo_check_tiling(bo, 0, 0);
192 return 0; 144 return 0;
193} 145}
194 146
195void radeon_object_kunmap(struct radeon_object *robj) 147void radeon_bo_kunmap(struct radeon_bo *bo)
196{ 148{
197 spin_lock(&robj->tobj.lock); 149 if (bo->kptr == NULL)
198 if (robj->kptr == NULL) {
199 spin_unlock(&robj->tobj.lock);
200 return; 150 return;
201 } 151 bo->kptr = NULL;
202 robj->kptr = NULL; 152 radeon_bo_check_tiling(bo, 0, 0);
203 spin_unlock(&robj->tobj.lock); 153 ttm_bo_kunmap(&bo->kmap);
204 radeon_object_check_tiling(robj, 0, 0);
205 ttm_bo_kunmap(&robj->kmap);
206} 154}
207 155
208void radeon_object_unref(struct radeon_object **robj) 156void radeon_bo_unref(struct radeon_bo **bo)
209{ 157{
210 struct ttm_buffer_object *tobj; 158 struct ttm_buffer_object *tbo;
211 159
212 if ((*robj) == NULL) { 160 if ((*bo) == NULL)
213 return; 161 return;
214 } 162 tbo = &((*bo)->tbo);
215 tobj = &((*robj)->tobj); 163 ttm_bo_unref(&tbo);
216 ttm_bo_unref(&tobj); 164 if (tbo == NULL)
217 if (tobj == NULL) { 165 *bo = NULL;
218 *robj = NULL;
219 }
220}
221
222int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
223{
224 *offset = robj->tobj.addr_space_offset;
225 return 0;
226} 166}
227 167
228int radeon_object_pin(struct radeon_object *robj, uint32_t domain, 168int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
229 uint64_t *gpu_addr)
230{ 169{
231 uint32_t flags; 170 int r, i;
232 uint32_t tmp;
233 int r;
234 171
235 flags = radeon_object_flags_from_domain(domain); 172 radeon_ttm_placement_from_domain(bo, domain);
236 spin_lock(&robj->tobj.lock); 173 if (bo->pin_count) {
237 if (robj->pin_count) { 174 bo->pin_count++;
238 robj->pin_count++; 175 if (gpu_addr)
239 if (gpu_addr != NULL) { 176 *gpu_addr = radeon_bo_gpu_offset(bo);
240 *gpu_addr = robj->gpu_addr;
241 }
242 spin_unlock(&robj->tobj.lock);
243 return 0; 177 return 0;
244 } 178 }
245 spin_unlock(&robj->tobj.lock); 179 radeon_ttm_placement_from_domain(bo, domain);
246 r = radeon_object_reserve(robj, false); 180 for (i = 0; i < bo->placement.num_placement; i++)
247 if (unlikely(r != 0)) { 181 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
248 DRM_ERROR("radeon: failed to reserve object for pinning it.\n"); 182 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
249 return r; 183 if (likely(r == 0)) {
250 } 184 bo->pin_count = 1;
251 tmp = robj->tobj.mem.placement; 185 if (gpu_addr != NULL)
252 ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM); 186 *gpu_addr = radeon_bo_gpu_offset(bo);
253 robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING; 187 }
254 r = ttm_buffer_object_validate(&robj->tobj, 188 if (unlikely(r != 0))
255 robj->tobj.proposed_placement, 189 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
256 false, false);
257 radeon_object_gpu_addr(robj);
258 if (gpu_addr != NULL) {
259 *gpu_addr = robj->gpu_addr;
260 }
261 robj->pin_count = 1;
262 if (unlikely(r != 0)) {
263 DRM_ERROR("radeon: failed to pin object.\n");
264 }
265 radeon_object_unreserve(robj);
266 return r; 190 return r;
267} 191}
268 192
269void radeon_object_unpin(struct radeon_object *robj) 193int radeon_bo_unpin(struct radeon_bo *bo)
270{ 194{
271 uint32_t flags; 195 int r, i;
272 int r;
273 196
274 spin_lock(&robj->tobj.lock); 197 if (!bo->pin_count) {
275 if (!robj->pin_count) { 198 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
276 spin_unlock(&robj->tobj.lock); 199 return 0;
277 printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
278 return;
279 }
280 robj->pin_count--;
281 if (robj->pin_count) {
282 spin_unlock(&robj->tobj.lock);
283 return;
284 }
285 spin_unlock(&robj->tobj.lock);
286 r = radeon_object_reserve(robj, false);
287 if (unlikely(r != 0)) {
288 DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
289 return;
290 }
291 flags = robj->tobj.mem.placement;
292 robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
293 r = ttm_buffer_object_validate(&robj->tobj,
294 robj->tobj.proposed_placement,
295 false, false);
296 if (unlikely(r != 0)) {
297 DRM_ERROR("radeon: failed to unpin buffer.\n");
298 }
299 radeon_object_unreserve(robj);
300}
301
302int radeon_object_wait(struct radeon_object *robj)
303{
304 int r = 0;
305
306 /* FIXME: should use block reservation instead */
307 r = radeon_object_reserve(robj, true);
308 if (unlikely(r != 0)) {
309 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
310 return r;
311 }
312 spin_lock(&robj->tobj.lock);
313 if (robj->tobj.sync_obj) {
314 r = ttm_bo_wait(&robj->tobj, true, true, false);
315 }
316 spin_unlock(&robj->tobj.lock);
317 radeon_object_unreserve(robj);
318 return r;
319}
320
321int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
322{
323 int r = 0;
324
325 r = radeon_object_reserve(robj, true);
326 if (unlikely(r != 0)) {
327 DRM_ERROR("radeon: failed to reserve object for waiting.\n");
328 return r;
329 }
330 spin_lock(&robj->tobj.lock);
331 *cur_placement = robj->tobj.mem.mem_type;
332 if (robj->tobj.sync_obj) {
333 r = ttm_bo_wait(&robj->tobj, true, true, true);
334 } 200 }
335 spin_unlock(&robj->tobj.lock); 201 bo->pin_count--;
336 radeon_object_unreserve(robj); 202 if (bo->pin_count)
203 return 0;
204 for (i = 0; i < bo->placement.num_placement; i++)
205 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
206 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
207 if (unlikely(r != 0))
208 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
337 return r; 209 return r;
338} 210}
339 211
340int radeon_object_evict_vram(struct radeon_device *rdev) 212int radeon_bo_evict_vram(struct radeon_device *rdev)
341{ 213{
342 if (rdev->flags & RADEON_IS_IGP) { 214 if (rdev->flags & RADEON_IS_IGP) {
343 /* Useless to evict on IGP chips */ 215 /* Useless to evict on IGP chips */
@@ -346,30 +218,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
346 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); 218 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
347} 219}
348 220
349void radeon_object_force_delete(struct radeon_device *rdev) 221void radeon_bo_force_delete(struct radeon_device *rdev)
350{ 222{
351 struct radeon_object *robj, *n; 223 struct radeon_bo *bo, *n;
352 struct drm_gem_object *gobj; 224 struct drm_gem_object *gobj;
353 225
354 if (list_empty(&rdev->gem.objects)) { 226 if (list_empty(&rdev->gem.objects)) {
355 return; 227 return;
356 } 228 }
357 DRM_ERROR("Userspace still has active objects !\n"); 229 dev_err(rdev->dev, "Userspace still has active objects !\n");
358 list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) { 230 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
359 mutex_lock(&rdev->ddev->struct_mutex); 231 mutex_lock(&rdev->ddev->struct_mutex);
360 gobj = robj->gobj; 232 gobj = bo->gobj;
361 DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n", 233 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
362 gobj, robj, (unsigned long)gobj->size, 234 gobj, bo, (unsigned long)gobj->size,
363 *((unsigned long *)&gobj->refcount)); 235 *((unsigned long *)&gobj->refcount));
364 list_del_init(&robj->list); 236 mutex_lock(&bo->rdev->gem.mutex);
365 radeon_object_unref(&robj); 237 list_del_init(&bo->list);
238 mutex_unlock(&bo->rdev->gem.mutex);
239 radeon_bo_unref(&bo);
366 gobj->driver_private = NULL; 240 gobj->driver_private = NULL;
367 drm_gem_object_unreference(gobj); 241 drm_gem_object_unreference(gobj);
368 mutex_unlock(&rdev->ddev->struct_mutex); 242 mutex_unlock(&rdev->ddev->struct_mutex);
369 } 243 }
370} 244}
371 245
372int radeon_object_init(struct radeon_device *rdev) 246int radeon_bo_init(struct radeon_device *rdev)
373{ 247{
374 /* Add an MTRR for the VRAM */ 248 /* Add an MTRR for the VRAM */
375 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, 249 rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +256,13 @@ int radeon_object_init(struct radeon_device *rdev)
382 return radeon_ttm_init(rdev); 256 return radeon_ttm_init(rdev);
383} 257}
384 258
385void radeon_object_fini(struct radeon_device *rdev) 259void radeon_bo_fini(struct radeon_device *rdev)
386{ 260{
387 radeon_ttm_fini(rdev); 261 radeon_ttm_fini(rdev);
388} 262}
389 263
390void radeon_object_list_add_object(struct radeon_object_list *lobj, 264void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
391 struct list_head *head) 265 struct list_head *head)
392{ 266{
393 if (lobj->wdomain) { 267 if (lobj->wdomain) {
394 list_add(&lobj->list, head); 268 list_add(&lobj->list, head);
@@ -397,72 +271,62 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
397 } 271 }
398} 272}
399 273
400int radeon_object_list_reserve(struct list_head *head) 274int radeon_bo_list_reserve(struct list_head *head)
401{ 275{
402 struct radeon_object_list *lobj; 276 struct radeon_bo_list *lobj;
403 int r; 277 int r;
404 278
405 list_for_each_entry(lobj, head, list){ 279 list_for_each_entry(lobj, head, list){
406 if (!lobj->robj->pin_count) { 280 r = radeon_bo_reserve(lobj->bo, false);
407 r = radeon_object_reserve(lobj->robj, true); 281 if (unlikely(r != 0))
408 if (unlikely(r != 0)) { 282 return r;
409 DRM_ERROR("radeon: failed to reserve object.\n");
410 return r;
411 }
412 } else {
413 }
414 } 283 }
415 return 0; 284 return 0;
416} 285}
417 286
418void radeon_object_list_unreserve(struct list_head *head) 287void radeon_bo_list_unreserve(struct list_head *head)
419{ 288{
420 struct radeon_object_list *lobj; 289 struct radeon_bo_list *lobj;
421 290
422 list_for_each_entry(lobj, head, list) { 291 list_for_each_entry(lobj, head, list) {
423 if (!lobj->robj->pin_count) { 292 /* only unreserve object we successfully reserved */
424 radeon_object_unreserve(lobj->robj); 293 if (radeon_bo_is_reserved(lobj->bo))
425 } 294 radeon_bo_unreserve(lobj->bo);
426 } 295 }
427} 296}
428 297
429int radeon_object_list_validate(struct list_head *head, void *fence) 298int radeon_bo_list_validate(struct list_head *head, void *fence)
430{ 299{
431 struct radeon_object_list *lobj; 300 struct radeon_bo_list *lobj;
432 struct radeon_object *robj; 301 struct radeon_bo *bo;
433 struct radeon_fence *old_fence = NULL; 302 struct radeon_fence *old_fence = NULL;
434 int r; 303 int r;
435 304
436 r = radeon_object_list_reserve(head); 305 r = radeon_bo_list_reserve(head);
437 if (unlikely(r != 0)) { 306 if (unlikely(r != 0)) {
438 radeon_object_list_unreserve(head);
439 return r; 307 return r;
440 } 308 }
441 list_for_each_entry(lobj, head, list) { 309 list_for_each_entry(lobj, head, list) {
442 robj = lobj->robj; 310 bo = lobj->bo;
443 if (!robj->pin_count) { 311 if (!bo->pin_count) {
444 if (lobj->wdomain) { 312 if (lobj->wdomain) {
445 robj->tobj.proposed_placement = 313 radeon_ttm_placement_from_domain(bo,
446 radeon_object_flags_from_domain(lobj->wdomain); 314 lobj->wdomain);
447 } else { 315 } else {
448 robj->tobj.proposed_placement = 316 radeon_ttm_placement_from_domain(bo,
449 radeon_object_flags_from_domain(lobj->rdomain); 317 lobj->rdomain);
450 } 318 }
451 r = ttm_buffer_object_validate(&robj->tobj, 319 r = ttm_bo_validate(&bo->tbo, &bo->placement,
452 robj->tobj.proposed_placement, 320 true, false);
453 true, false); 321 if (unlikely(r))
454 if (unlikely(r)) {
455 DRM_ERROR("radeon: failed to validate.\n");
456 return r; 322 return r;
457 }
458 radeon_object_gpu_addr(robj);
459 } 323 }
460 lobj->gpu_offset = robj->gpu_addr; 324 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
461 lobj->tiling_flags = robj->tiling_flags; 325 lobj->tiling_flags = bo->tiling_flags;
462 if (fence) { 326 if (fence) {
463 old_fence = (struct radeon_fence *)robj->tobj.sync_obj; 327 old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
464 robj->tobj.sync_obj = radeon_fence_ref(fence); 328 bo->tbo.sync_obj = radeon_fence_ref(fence);
465 robj->tobj.sync_obj_arg = NULL; 329 bo->tbo.sync_obj_arg = NULL;
466 } 330 }
467 if (old_fence) { 331 if (old_fence) {
468 radeon_fence_unref(&old_fence); 332 radeon_fence_unref(&old_fence);
@@ -471,51 +335,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
471 return 0; 335 return 0;
472} 336}
473 337
474void radeon_object_list_unvalidate(struct list_head *head) 338void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
475{ 339{
476 struct radeon_object_list *lobj; 340 struct radeon_bo_list *lobj;
477 struct radeon_fence *old_fence = NULL; 341 struct radeon_fence *old_fence;
478 342
479 list_for_each_entry(lobj, head, list) { 343 if (fence)
480 old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; 344 list_for_each_entry(lobj, head, list) {
481 lobj->robj->tobj.sync_obj = NULL; 345 old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
482 if (old_fence) { 346 if (old_fence == fence) {
483 radeon_fence_unref(&old_fence); 347 lobj->bo->tbo.sync_obj = NULL;
348 radeon_fence_unref(&old_fence);
349 }
484 } 350 }
485 } 351 radeon_bo_list_unreserve(head);
486 radeon_object_list_unreserve(head);
487}
488
489void radeon_object_list_clean(struct list_head *head)
490{
491 radeon_object_list_unreserve(head);
492} 352}
493 353
494int radeon_object_fbdev_mmap(struct radeon_object *robj, 354int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
495 struct vm_area_struct *vma) 355 struct vm_area_struct *vma)
496{ 356{
497 return ttm_fbdev_mmap(vma, &robj->tobj); 357 return ttm_fbdev_mmap(vma, &bo->tbo);
498} 358}
499 359
500unsigned long radeon_object_size(struct radeon_object *robj) 360int radeon_bo_get_surface_reg(struct radeon_bo *bo)
501{ 361{
502 return robj->tobj.num_pages << PAGE_SHIFT; 362 struct radeon_device *rdev = bo->rdev;
503}
504
505int radeon_object_get_surface_reg(struct radeon_object *robj)
506{
507 struct radeon_device *rdev = robj->rdev;
508 struct radeon_surface_reg *reg; 363 struct radeon_surface_reg *reg;
509 struct radeon_object *old_object; 364 struct radeon_bo *old_object;
510 int steal; 365 int steal;
511 int i; 366 int i;
512 367
513 if (!robj->tiling_flags) 368 BUG_ON(!atomic_read(&bo->tbo.reserved));
369
370 if (!bo->tiling_flags)
514 return 0; 371 return 0;
515 372
516 if (robj->surface_reg >= 0) { 373 if (bo->surface_reg >= 0) {
517 reg = &rdev->surface_regs[robj->surface_reg]; 374 reg = &rdev->surface_regs[bo->surface_reg];
518 i = robj->surface_reg; 375 i = bo->surface_reg;
519 goto out; 376 goto out;
520 } 377 }
521 378
@@ -523,10 +380,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
523 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { 380 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
524 381
525 reg = &rdev->surface_regs[i]; 382 reg = &rdev->surface_regs[i];
526 if (!reg->robj) 383 if (!reg->bo)
527 break; 384 break;
528 385
529 old_object = reg->robj; 386 old_object = reg->bo;
530 if (old_object->pin_count == 0) 387 if (old_object->pin_count == 0)
531 steal = i; 388 steal = i;
532 } 389 }
@@ -537,91 +394,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
537 return -ENOMEM; 394 return -ENOMEM;
538 /* find someone with a surface reg and nuke their BO */ 395 /* find someone with a surface reg and nuke their BO */
539 reg = &rdev->surface_regs[steal]; 396 reg = &rdev->surface_regs[steal];
540 old_object = reg->robj; 397 old_object = reg->bo;
541 /* blow away the mapping */ 398 /* blow away the mapping */
542 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); 399 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
543 ttm_bo_unmap_virtual(&old_object->tobj); 400 ttm_bo_unmap_virtual(&old_object->tbo);
544 old_object->surface_reg = -1; 401 old_object->surface_reg = -1;
545 i = steal; 402 i = steal;
546 } 403 }
547 404
548 robj->surface_reg = i; 405 bo->surface_reg = i;
549 reg->robj = robj; 406 reg->bo = bo;
550 407
551out: 408out:
552 radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch, 409 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
553 robj->tobj.mem.mm_node->start << PAGE_SHIFT, 410 bo->tbo.mem.mm_node->start << PAGE_SHIFT,
554 robj->tobj.num_pages << PAGE_SHIFT); 411 bo->tbo.num_pages << PAGE_SHIFT);
555 return 0; 412 return 0;
556} 413}
557 414
558void radeon_object_clear_surface_reg(struct radeon_object *robj) 415static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
559{ 416{
560 struct radeon_device *rdev = robj->rdev; 417 struct radeon_device *rdev = bo->rdev;
561 struct radeon_surface_reg *reg; 418 struct radeon_surface_reg *reg;
562 419
563 if (robj->surface_reg == -1) 420 if (bo->surface_reg == -1)
564 return; 421 return;
565 422
566 reg = &rdev->surface_regs[robj->surface_reg]; 423 reg = &rdev->surface_regs[bo->surface_reg];
567 radeon_clear_surface_reg(rdev, robj->surface_reg); 424 radeon_clear_surface_reg(rdev, bo->surface_reg);
568 425
569 reg->robj = NULL; 426 reg->bo = NULL;
570 robj->surface_reg = -1; 427 bo->surface_reg = -1;
571} 428}
572 429
573void radeon_object_set_tiling_flags(struct radeon_object *robj, 430int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
574 uint32_t tiling_flags, uint32_t pitch) 431 uint32_t tiling_flags, uint32_t pitch)
575{ 432{
576 robj->tiling_flags = tiling_flags; 433 int r;
577 robj->pitch = pitch; 434
435 r = radeon_bo_reserve(bo, false);
436 if (unlikely(r != 0))
437 return r;
438 bo->tiling_flags = tiling_flags;
439 bo->pitch = pitch;
440 radeon_bo_unreserve(bo);
441 return 0;
578} 442}
579 443
580void radeon_object_get_tiling_flags(struct radeon_object *robj, 444void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
581 uint32_t *tiling_flags, 445 uint32_t *tiling_flags,
582 uint32_t *pitch) 446 uint32_t *pitch)
583{ 447{
448 BUG_ON(!atomic_read(&bo->tbo.reserved));
584 if (tiling_flags) 449 if (tiling_flags)
585 *tiling_flags = robj->tiling_flags; 450 *tiling_flags = bo->tiling_flags;
586 if (pitch) 451 if (pitch)
587 *pitch = robj->pitch; 452 *pitch = bo->pitch;
588} 453}
589 454
590int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved, 455int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
591 bool force_drop) 456 bool force_drop)
592{ 457{
593 if (!(robj->tiling_flags & RADEON_TILING_SURFACE)) 458 BUG_ON(!atomic_read(&bo->tbo.reserved));
459
460 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
594 return 0; 461 return 0;
595 462
596 if (force_drop) { 463 if (force_drop) {
597 radeon_object_clear_surface_reg(robj); 464 radeon_bo_clear_surface_reg(bo);
598 return 0; 465 return 0;
599 } 466 }
600 467
601 if (robj->tobj.mem.mem_type != TTM_PL_VRAM) { 468 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
602 if (!has_moved) 469 if (!has_moved)
603 return 0; 470 return 0;
604 471
605 if (robj->surface_reg >= 0) 472 if (bo->surface_reg >= 0)
606 radeon_object_clear_surface_reg(robj); 473 radeon_bo_clear_surface_reg(bo);
607 return 0; 474 return 0;
608 } 475 }
609 476
610 if ((robj->surface_reg >= 0) && !has_moved) 477 if ((bo->surface_reg >= 0) && !has_moved)
611 return 0; 478 return 0;
612 479
613 return radeon_object_get_surface_reg(robj); 480 return radeon_bo_get_surface_reg(bo);
614} 481}
615 482
616void radeon_bo_move_notify(struct ttm_buffer_object *bo, 483void radeon_bo_move_notify(struct ttm_buffer_object *bo,
617 struct ttm_mem_reg *mem) 484 struct ttm_mem_reg *mem)
618{ 485{
619 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 486 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
620 radeon_object_check_tiling(robj, 0, 1); 487 radeon_bo_check_tiling(rbo, 0, 1);
621} 488}
622 489
623void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 490void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
624{ 491{
625 struct radeon_object *robj = container_of(bo, struct radeon_object, tobj); 492 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
626 radeon_object_check_tiling(robj, 0, 0); 493 radeon_bo_check_tiling(rbo, 0, 0);
627} 494}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 10e8af6bb456..f6b69c2c0d00 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -28,19 +28,152 @@
28#ifndef __RADEON_OBJECT_H__ 28#ifndef __RADEON_OBJECT_H__
29#define __RADEON_OBJECT_H__ 29#define __RADEON_OBJECT_H__
30 30
31#include <ttm/ttm_bo_api.h> 31#include <drm/radeon_drm.h>
32#include <ttm/ttm_bo_driver.h> 32#include "radeon.h"
33#include <ttm/ttm_placement.h>
34#include <ttm/ttm_module.h>
35 33
36/* 34/**
37 * TTM. 35 * radeon_mem_type_to_domain - return domain corresponding to mem_type
36 * @mem_type: ttm memory type
37 *
38 * Returns corresponding domain of the ttm mem_type
39 */
40static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
41{
42 switch (mem_type) {
43 case TTM_PL_VRAM:
44 return RADEON_GEM_DOMAIN_VRAM;
45 case TTM_PL_TT:
46 return RADEON_GEM_DOMAIN_GTT;
47 case TTM_PL_SYSTEM:
48 return RADEON_GEM_DOMAIN_CPU;
49 default:
50 break;
51 }
52 return 0;
53}
54
55/**
56 * radeon_bo_reserve - reserve bo
57 * @bo: bo structure
58 * @no_wait: don't sleep while trying to reserve (return -EBUSY)
59 *
60 * Returns:
61 * -EBUSY: buffer is busy and @no_wait is true
62 * -ERESTART: A wait for the buffer to become unreserved was interrupted by
63 * a signal. Release all buffer reservations and return to user-space.
38 */ 64 */
39struct radeon_mman { 65static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
40 struct ttm_bo_global_ref bo_global_ref; 66{
41 struct ttm_global_reference mem_global_ref; 67 int r;
42 bool mem_global_referenced; 68
43 struct ttm_bo_device bdev; 69retry:
44}; 70 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
71 if (unlikely(r != 0)) {
72 if (r == -ERESTART)
73 goto retry;
74 dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
75 return r;
76 }
77 return 0;
78}
79
80static inline void radeon_bo_unreserve(struct radeon_bo *bo)
81{
82 ttm_bo_unreserve(&bo->tbo);
83}
84
85/**
86 * radeon_bo_gpu_offset - return GPU offset of bo
87 * @bo: radeon object for which we query the offset
88 *
89 * Returns current GPU offset of the object.
90 *
91 * Note: object should either be pinned or reserved when calling this
92 * function, it might be usefull to add check for this for debugging.
93 */
94static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
95{
96 return bo->tbo.offset;
97}
98
99static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
100{
101 return bo->tbo.num_pages << PAGE_SHIFT;
102}
103
104static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
105{
106 return !!atomic_read(&bo->tbo.reserved);
107}
108
109/**
110 * radeon_bo_mmap_offset - return mmap offset of bo
111 * @bo: radeon object for which we query the offset
112 *
113 * Returns mmap offset of the object.
114 *
115 * Note: addr_space_offset is constant after ttm bo init thus isn't protected
116 * by any lock.
117 */
118static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
119{
120 return bo->tbo.addr_space_offset;
121}
122
123static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
124 bool no_wait)
125{
126 int r;
127
128retry:
129 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
130 if (unlikely(r != 0)) {
131 if (r == -ERESTART)
132 goto retry;
133 dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
134 return r;
135 }
136 spin_lock(&bo->tbo.lock);
137 if (mem_type)
138 *mem_type = bo->tbo.mem.mem_type;
139 if (bo->tbo.sync_obj)
140 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
141 spin_unlock(&bo->tbo.lock);
142 ttm_bo_unreserve(&bo->tbo);
143 if (unlikely(r == -ERESTART))
144 goto retry;
145 return r;
146}
45 147
148extern int radeon_bo_create(struct radeon_device *rdev,
149 struct drm_gem_object *gobj, unsigned long size,
150 bool kernel, u32 domain,
151 struct radeon_bo **bo_ptr);
152extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
153extern void radeon_bo_kunmap(struct radeon_bo *bo);
154extern void radeon_bo_unref(struct radeon_bo **bo);
155extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
156extern int radeon_bo_unpin(struct radeon_bo *bo);
157extern int radeon_bo_evict_vram(struct radeon_device *rdev);
158extern void radeon_bo_force_delete(struct radeon_device *rdev);
159extern int radeon_bo_init(struct radeon_device *rdev);
160extern void radeon_bo_fini(struct radeon_device *rdev);
161extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
162 struct list_head *head);
163extern int radeon_bo_list_reserve(struct list_head *head);
164extern void radeon_bo_list_unreserve(struct list_head *head);
165extern int radeon_bo_list_validate(struct list_head *head, void *fence);
166extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
167extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
168 struct vm_area_struct *vma);
169extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
170 u32 tiling_flags, u32 pitch);
171extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
172 u32 *tiling_flags, u32 *pitch);
173extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
174 bool force_drop);
175extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
176 struct ttm_mem_reg *mem);
177extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
178extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
46#endif 179#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 46146c6a2a06..34b08d307c81 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
27int radeon_pm_init(struct radeon_device *rdev) 27int radeon_pm_init(struct radeon_device *rdev)
28{ 28{
29 if (radeon_debugfs_pm_init(rdev)) { 29 if (radeon_debugfs_pm_init(rdev)) {
30 DRM_ERROR("Failed to register debugfs file for CP !\n"); 30 DRM_ERROR("Failed to register debugfs file for PM!\n");
31 } 31 }
32 32
33 return 0; 33 return 0;
@@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
44 struct drm_device *dev = node->minor->dev; 44 struct drm_device *dev = node->minor->dev;
45 struct radeon_device *rdev = dev->dev_private; 45 struct radeon_device *rdev = dev->dev_private;
46 46
47 seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev)); 47 seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
48 seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev)); 48 seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
49 49
50 return 0; 50 return 0;
51} 51}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 29ab75903ec1..6d0a009dd4a1 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -887,6 +887,7 @@
887# define RADEON_FP_PANEL_FORMAT (1 << 3) 887# define RADEON_FP_PANEL_FORMAT (1 << 3)
888# define RADEON_FP_EN_TMDS (1 << 7) 888# define RADEON_FP_EN_TMDS (1 << 7)
889# define RADEON_FP_DETECT_SENSE (1 << 8) 889# define RADEON_FP_DETECT_SENSE (1 << 8)
890# define RADEON_FP_DETECT_INT_POL (1 << 9)
890# define R200_FP_SOURCE_SEL_MASK (3 << 10) 891# define R200_FP_SOURCE_SEL_MASK (3 << 10)
891# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10) 892# define R200_FP_SOURCE_SEL_CRTC1 (0 << 10)
892# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10) 893# define R200_FP_SOURCE_SEL_CRTC2 (1 << 10)
@@ -894,6 +895,7 @@
894# define R200_FP_SOURCE_SEL_TRANS (3 << 10) 895# define R200_FP_SOURCE_SEL_TRANS (3 << 10)
895# define RADEON_FP_SEL_CRTC1 (0 << 13) 896# define RADEON_FP_SEL_CRTC1 (0 << 13)
896# define RADEON_FP_SEL_CRTC2 (1 << 13) 897# define RADEON_FP_SEL_CRTC2 (1 << 13)
898# define R300_HPD_SEL(x) ((x) << 13)
897# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15) 899# define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
898# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16) 900# define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
899# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17) 901# define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
909# define RADEON_FP2_ON (1 << 2) 911# define RADEON_FP2_ON (1 << 2)
910# define RADEON_FP2_PANEL_FORMAT (1 << 3) 912# define RADEON_FP2_PANEL_FORMAT (1 << 3)
911# define RADEON_FP2_DETECT_SENSE (1 << 8) 913# define RADEON_FP2_DETECT_SENSE (1 << 8)
914# define RADEON_FP2_DETECT_INT_POL (1 << 9)
912# define R200_FP2_SOURCE_SEL_MASK (3 << 10) 915# define R200_FP2_SOURCE_SEL_MASK (3 << 10)
913# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10) 916# define R200_FP2_SOURCE_SEL_CRTC1 (0 << 10)
914# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10) 917# define R200_FP2_SOURCE_SEL_CRTC2 (1 << 10)
@@ -988,14 +991,20 @@
988 991
989#define RADEON_GEN_INT_CNTL 0x0040 992#define RADEON_GEN_INT_CNTL 0x0040
990# define RADEON_CRTC_VBLANK_MASK (1 << 0) 993# define RADEON_CRTC_VBLANK_MASK (1 << 0)
994# define RADEON_FP_DETECT_MASK (1 << 4)
991# define RADEON_CRTC2_VBLANK_MASK (1 << 9) 995# define RADEON_CRTC2_VBLANK_MASK (1 << 9)
996# define RADEON_FP2_DETECT_MASK (1 << 10)
992# define RADEON_SW_INT_ENABLE (1 << 25) 997# define RADEON_SW_INT_ENABLE (1 << 25)
993#define RADEON_GEN_INT_STATUS 0x0044 998#define RADEON_GEN_INT_STATUS 0x0044
994# define AVIVO_DISPLAY_INT_STATUS (1 << 0) 999# define AVIVO_DISPLAY_INT_STATUS (1 << 0)
995# define RADEON_CRTC_VBLANK_STAT (1 << 0) 1000# define RADEON_CRTC_VBLANK_STAT (1 << 0)
996# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) 1001# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0)
1002# define RADEON_FP_DETECT_STAT (1 << 4)
1003# define RADEON_FP_DETECT_STAT_ACK (1 << 4)
997# define RADEON_CRTC2_VBLANK_STAT (1 << 9) 1004# define RADEON_CRTC2_VBLANK_STAT (1 << 9)
998# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) 1005# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9)
1006# define RADEON_FP2_DETECT_STAT (1 << 10)
1007# define RADEON_FP2_DETECT_STAT_ACK (1 << 10)
999# define RADEON_SW_INT_FIRE (1 << 26) 1008# define RADEON_SW_INT_FIRE (1 << 26)
1000# define RADEON_SW_INT_TEST (1 << 25) 1009# define RADEON_SW_INT_TEST (1 << 25)
1001# define RADEON_SW_INT_TEST_ACK (1 << 25) 1010# define RADEON_SW_INT_TEST_ACK (1 << 25)
@@ -1051,20 +1060,25 @@
1051 1060
1052 /* Multimedia I2C bus */ 1061 /* Multimedia I2C bus */
1053#define RADEON_I2C_CNTL_0 0x0090 1062#define RADEON_I2C_CNTL_0 0x0090
1054#define RADEON_I2C_DONE (1<<0) 1063#define RADEON_I2C_DONE (1 << 0)
1055#define RADEON_I2C_NACK (1<<1) 1064#define RADEON_I2C_NACK (1 << 1)
1056#define RADEON_I2C_HALT (1<<2) 1065#define RADEON_I2C_HALT (1 << 2)
1057#define RADEON_I2C_SOFT_RST (1<<5) 1066#define RADEON_I2C_SOFT_RST (1 << 5)
1058#define RADEON_I2C_DRIVE_EN (1<<6) 1067#define RADEON_I2C_DRIVE_EN (1 << 6)
1059#define RADEON_I2C_DRIVE_SEL (1<<7) 1068#define RADEON_I2C_DRIVE_SEL (1 << 7)
1060#define RADEON_I2C_START (1<<8) 1069#define RADEON_I2C_START (1 << 8)
1061#define RADEON_I2C_STOP (1<<9) 1070#define RADEON_I2C_STOP (1 << 9)
1062#define RADEON_I2C_RECEIVE (1<<10) 1071#define RADEON_I2C_RECEIVE (1 << 10)
1063#define RADEON_I2C_ABORT (1<<11) 1072#define RADEON_I2C_ABORT (1 << 11)
1064#define RADEON_I2C_GO (1<<12) 1073#define RADEON_I2C_GO (1 << 12)
1074#define RADEON_I2C_PRESCALE_SHIFT 16
1065#define RADEON_I2C_CNTL_1 0x0094 1075#define RADEON_I2C_CNTL_1 0x0094
1066#define RADEON_I2C_SEL (1<<16) 1076#define RADEON_I2C_DATA_COUNT_SHIFT 0
1067#define RADEON_I2C_EN (1<<17) 1077#define RADEON_I2C_ADDR_COUNT_SHIFT 4
1078#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
1079#define RADEON_I2C_SEL (1 << 16)
1080#define RADEON_I2C_EN (1 << 17)
1081#define RADEON_I2C_TIME_LIMIT_SHIFT 24
1068#define RADEON_I2C_DATA 0x0098 1082#define RADEON_I2C_DATA 0x0098
1069 1083
1070#define RADEON_DVI_I2C_CNTL_0 0x02e0 1084#define RADEON_DVI_I2C_CNTL_0 0x02e0
@@ -1072,7 +1086,7 @@
1072# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */ 1086# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
1073# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */ 1087# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
1074# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */ 1088# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
1075#define RADEON_DVI_I2C_CNTL_1 0x02e4 /* ? */ 1089#define RADEON_DVI_I2C_CNTL_1 0x02e4
1076#define RADEON_DVI_I2C_DATA 0x02e8 1090#define RADEON_DVI_I2C_DATA 0x02e8
1077 1091
1078#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */ 1092#define RADEON_INTERRUPT_LINE 0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
1143# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13) 1157# define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
1144# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14) 1158# define RADEON_MC_MCLK_DYN_ENABLE (1 << 14)
1145# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15) 1159# define RADEON_IO_MCLK_DYN_ENABLE (1 << 15)
1146#define RADEON_LCD_GPIO_MASK 0x01a0 1160
1147#define RADEON_GPIOPAD_EN 0x01a0
1148#define RADEON_LCD_GPIO_Y_REG 0x01a4
1149#define RADEON_MDGPIO_A_REG 0x01ac
1150#define RADEON_MDGPIO_EN_REG 0x01b0
1151#define RADEON_MDGPIO_MASK 0x0198
1152#define RADEON_GPIOPAD_MASK 0x0198 1161#define RADEON_GPIOPAD_MASK 0x0198
1153#define RADEON_GPIOPAD_A 0x019c 1162#define RADEON_GPIOPAD_A 0x019c
1154#define RADEON_MDGPIO_Y_REG 0x01b4 1163#define RADEON_GPIOPAD_EN 0x01a0
1164#define RADEON_GPIOPAD_Y 0x01a4
1165#define RADEON_MDGPIO_MASK 0x01a8
1166#define RADEON_MDGPIO_A 0x01ac
1167#define RADEON_MDGPIO_EN 0x01b0
1168#define RADEON_MDGPIO_Y 0x01b4
1169
1155#define RADEON_MEM_ADDR_CONFIG 0x0148 1170#define RADEON_MEM_ADDR_CONFIG 0x0148
1156#define RADEON_MEM_BASE 0x0f10 /* PCI */ 1171#define RADEON_MEM_BASE 0x0f10 /* PCI */
1157#define RADEON_MEM_CNTL 0x0140 1172#define RADEON_MEM_CNTL 0x0140
@@ -1360,6 +1375,9 @@
1360#define RADEON_OVR_CLR 0x0230 1375#define RADEON_OVR_CLR 0x0230
1361#define RADEON_OVR_WID_LEFT_RIGHT 0x0234 1376#define RADEON_OVR_WID_LEFT_RIGHT 0x0234
1362#define RADEON_OVR_WID_TOP_BOTTOM 0x0238 1377#define RADEON_OVR_WID_TOP_BOTTOM 0x0238
1378#define RADEON_OVR2_CLR 0x0330
1379#define RADEON_OVR2_WID_LEFT_RIGHT 0x0334
1380#define RADEON_OVR2_WID_TOP_BOTTOM 0x0338
1363 1381
1364/* first capture unit */ 1382/* first capture unit */
1365 1383
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 747b4bffb84b..4d12b2d17b4d 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
165 return 0; 165 return 0;
166 /* Allocate 1M object buffer */ 166 /* Allocate 1M object buffer */
167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); 167 INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
168 r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, 168 r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
169 true, RADEON_GEM_DOMAIN_GTT, 169 true, RADEON_GEM_DOMAIN_GTT,
170 false, &rdev->ib_pool.robj); 170 &rdev->ib_pool.robj);
171 if (r) { 171 if (r) {
172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r); 172 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
173 return r; 173 return r;
174 } 174 }
175 r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr); 175 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
176 if (unlikely(r != 0))
177 return r;
178 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
176 if (r) { 179 if (r) {
180 radeon_bo_unreserve(rdev->ib_pool.robj);
177 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r); 181 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
178 return r; 182 return r;
179 } 183 }
180 r = radeon_object_kmap(rdev->ib_pool.robj, &ptr); 184 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
185 radeon_bo_unreserve(rdev->ib_pool.robj);
181 if (r) { 186 if (r) {
182 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r); 187 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
183 return r; 188 return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
203 208
204void radeon_ib_pool_fini(struct radeon_device *rdev) 209void radeon_ib_pool_fini(struct radeon_device *rdev)
205{ 210{
211 int r;
212
206 if (!rdev->ib_pool.ready) { 213 if (!rdev->ib_pool.ready) {
207 return; 214 return;
208 } 215 }
209 mutex_lock(&rdev->ib_pool.mutex); 216 mutex_lock(&rdev->ib_pool.mutex);
210 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); 217 bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
211 if (rdev->ib_pool.robj) { 218 if (rdev->ib_pool.robj) {
212 radeon_object_kunmap(rdev->ib_pool.robj); 219 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
213 radeon_object_unref(&rdev->ib_pool.robj); 220 if (likely(r == 0)) {
221 radeon_bo_kunmap(rdev->ib_pool.robj);
222 radeon_bo_unpin(rdev->ib_pool.robj);
223 radeon_bo_unreserve(rdev->ib_pool.robj);
224 }
225 radeon_bo_unref(&rdev->ib_pool.robj);
214 rdev->ib_pool.robj = NULL; 226 rdev->ib_pool.robj = NULL;
215 } 227 }
216 mutex_unlock(&rdev->ib_pool.mutex); 228 mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
288 rdev->cp.ring_size = ring_size; 300 rdev->cp.ring_size = ring_size;
289 /* Allocate ring buffer */ 301 /* Allocate ring buffer */
290 if (rdev->cp.ring_obj == NULL) { 302 if (rdev->cp.ring_obj == NULL) {
291 r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, 303 r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
292 true, 304 RADEON_GEM_DOMAIN_GTT,
293 RADEON_GEM_DOMAIN_GTT, 305 &rdev->cp.ring_obj);
294 false,
295 &rdev->cp.ring_obj);
296 if (r) { 306 if (r) {
297 DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); 307 dev_err(rdev->dev, "(%d) ring create failed\n", r);
298 mutex_unlock(&rdev->cp.mutex);
299 return r; 308 return r;
300 } 309 }
301 r = radeon_object_pin(rdev->cp.ring_obj, 310 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
302 RADEON_GEM_DOMAIN_GTT, 311 if (unlikely(r != 0))
303 &rdev->cp.gpu_addr); 312 return r;
313 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
314 &rdev->cp.gpu_addr);
304 if (r) { 315 if (r) {
305 DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); 316 radeon_bo_unreserve(rdev->cp.ring_obj);
306 mutex_unlock(&rdev->cp.mutex); 317 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
307 return r; 318 return r;
308 } 319 }
309 r = radeon_object_kmap(rdev->cp.ring_obj, 320 r = radeon_bo_kmap(rdev->cp.ring_obj,
310 (void **)&rdev->cp.ring); 321 (void **)&rdev->cp.ring);
322 radeon_bo_unreserve(rdev->cp.ring_obj);
311 if (r) { 323 if (r) {
312 DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); 324 dev_err(rdev->dev, "(%d) ring map failed\n", r);
313 mutex_unlock(&rdev->cp.mutex);
314 return r; 325 return r;
315 } 326 }
316 } 327 }
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
321 332
322void radeon_ring_fini(struct radeon_device *rdev) 333void radeon_ring_fini(struct radeon_device *rdev)
323{ 334{
335 int r;
336
324 mutex_lock(&rdev->cp.mutex); 337 mutex_lock(&rdev->cp.mutex);
325 if (rdev->cp.ring_obj) { 338 if (rdev->cp.ring_obj) {
326 radeon_object_kunmap(rdev->cp.ring_obj); 339 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
327 radeon_object_unpin(rdev->cp.ring_obj); 340 if (likely(r == 0)) {
328 radeon_object_unref(&rdev->cp.ring_obj); 341 radeon_bo_kunmap(rdev->cp.ring_obj);
342 radeon_bo_unpin(rdev->cp.ring_obj);
343 radeon_bo_unreserve(rdev->cp.ring_obj);
344 }
345 radeon_bo_unref(&rdev->cp.ring_obj);
329 rdev->cp.ring = NULL; 346 rdev->cp.ring = NULL;
330 rdev->cp.ring_obj = NULL; 347 rdev->cp.ring_obj = NULL;
331 } 348 }
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index f8a465d9a1cf..391c973ec4db 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -30,8 +30,8 @@
30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */ 30/* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
31void radeon_test_moves(struct radeon_device *rdev) 31void radeon_test_moves(struct radeon_device *rdev)
32{ 32{
33 struct radeon_object *vram_obj = NULL; 33 struct radeon_bo *vram_obj = NULL;
34 struct radeon_object **gtt_obj = NULL; 34 struct radeon_bo **gtt_obj = NULL;
35 struct radeon_fence *fence = NULL; 35 struct radeon_fence *fence = NULL;
36 uint64_t gtt_addr, vram_addr; 36 uint64_t gtt_addr, vram_addr;
37 unsigned i, n, size; 37 unsigned i, n, size;
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
52 goto out_cleanup; 52 goto out_cleanup;
53 } 53 }
54 54
55 r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM, 55 r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
56 false, &vram_obj); 56 &vram_obj);
57 if (r) { 57 if (r) {
58 DRM_ERROR("Failed to create VRAM object\n"); 58 DRM_ERROR("Failed to create VRAM object\n");
59 goto out_cleanup; 59 goto out_cleanup;
60 } 60 }
61 61 r = radeon_bo_reserve(vram_obj, false);
62 r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr); 62 if (unlikely(r != 0))
63 goto out_cleanup;
64 r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
63 if (r) { 65 if (r) {
64 DRM_ERROR("Failed to pin VRAM object\n"); 66 DRM_ERROR("Failed to pin VRAM object\n");
65 goto out_cleanup; 67 goto out_cleanup;
66 } 68 }
67
68 for (i = 0; i < n; i++) { 69 for (i = 0; i < n; i++) {
69 void *gtt_map, *vram_map; 70 void *gtt_map, *vram_map;
70 void **gtt_start, **gtt_end; 71 void **gtt_start, **gtt_end;
71 void **vram_start, **vram_end; 72 void **vram_start, **vram_end;
72 73
73 r = radeon_object_create(rdev, NULL, size, true, 74 r = radeon_bo_create(rdev, NULL, size, true,
74 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i); 75 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
75 if (r) { 76 if (r) {
76 DRM_ERROR("Failed to create GTT object %d\n", i); 77 DRM_ERROR("Failed to create GTT object %d\n", i);
77 goto out_cleanup; 78 goto out_cleanup;
78 } 79 }
79 80
80 r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr); 81 r = radeon_bo_reserve(gtt_obj[i], false);
82 if (unlikely(r != 0))
83 goto out_cleanup;
84 r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
81 if (r) { 85 if (r) {
82 DRM_ERROR("Failed to pin GTT object %d\n", i); 86 DRM_ERROR("Failed to pin GTT object %d\n", i);
83 goto out_cleanup; 87 goto out_cleanup;
84 } 88 }
85 89
86 r = radeon_object_kmap(gtt_obj[i], &gtt_map); 90 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
87 if (r) { 91 if (r) {
88 DRM_ERROR("Failed to map GTT object %d\n", i); 92 DRM_ERROR("Failed to map GTT object %d\n", i);
89 goto out_cleanup; 93 goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
94 gtt_start++) 98 gtt_start++)
95 *gtt_start = gtt_start; 99 *gtt_start = gtt_start;
96 100
97 radeon_object_kunmap(gtt_obj[i]); 101 radeon_bo_kunmap(gtt_obj[i]);
98 102
99 r = radeon_fence_create(rdev, &fence); 103 r = radeon_fence_create(rdev, &fence);
100 if (r) { 104 if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
116 120
117 radeon_fence_unref(&fence); 121 radeon_fence_unref(&fence);
118 122
119 r = radeon_object_kmap(vram_obj, &vram_map); 123 r = radeon_bo_kmap(vram_obj, &vram_map);
120 if (r) { 124 if (r) {
121 DRM_ERROR("Failed to map VRAM object after copy %d\n", i); 125 DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
122 goto out_cleanup; 126 goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
131 "expected 0x%p (GTT map 0x%p-0x%p)\n", 135 "expected 0x%p (GTT map 0x%p-0x%p)\n",
132 i, *vram_start, gtt_start, gtt_map, 136 i, *vram_start, gtt_start, gtt_map,
133 gtt_end); 137 gtt_end);
134 radeon_object_kunmap(vram_obj); 138 radeon_bo_kunmap(vram_obj);
135 goto out_cleanup; 139 goto out_cleanup;
136 } 140 }
137 *vram_start = vram_start; 141 *vram_start = vram_start;
138 } 142 }
139 143
140 radeon_object_kunmap(vram_obj); 144 radeon_bo_kunmap(vram_obj);
141 145
142 r = radeon_fence_create(rdev, &fence); 146 r = radeon_fence_create(rdev, &fence);
143 if (r) { 147 if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
159 163
160 radeon_fence_unref(&fence); 164 radeon_fence_unref(&fence);
161 165
162 r = radeon_object_kmap(gtt_obj[i], &gtt_map); 166 r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
163 if (r) { 167 if (r) {
164 DRM_ERROR("Failed to map GTT object after copy %d\n", i); 168 DRM_ERROR("Failed to map GTT object after copy %d\n", i);
165 goto out_cleanup; 169 goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
174 "expected 0x%p (VRAM map 0x%p-0x%p)\n", 178 "expected 0x%p (VRAM map 0x%p-0x%p)\n",
175 i, *gtt_start, vram_start, vram_map, 179 i, *gtt_start, vram_start, vram_map,
176 vram_end); 180 vram_end);
177 radeon_object_kunmap(gtt_obj[i]); 181 radeon_bo_kunmap(gtt_obj[i]);
178 goto out_cleanup; 182 goto out_cleanup;
179 } 183 }
180 } 184 }
181 185
182 radeon_object_kunmap(gtt_obj[i]); 186 radeon_bo_kunmap(gtt_obj[i]);
183 187
184 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n", 188 DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
185 gtt_addr - rdev->mc.gtt_location); 189 gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
187 191
188out_cleanup: 192out_cleanup:
189 if (vram_obj) { 193 if (vram_obj) {
190 radeon_object_unpin(vram_obj); 194 if (radeon_bo_is_reserved(vram_obj)) {
191 radeon_object_unref(&vram_obj); 195 radeon_bo_unpin(vram_obj);
196 radeon_bo_unreserve(vram_obj);
197 }
198 radeon_bo_unref(&vram_obj);
192 } 199 }
193 if (gtt_obj) { 200 if (gtt_obj) {
194 for (i = 0; i < n; i++) { 201 for (i = 0; i < n; i++) {
195 if (gtt_obj[i]) { 202 if (gtt_obj[i]) {
196 radeon_object_unpin(gtt_obj[i]); 203 if (radeon_bo_is_reserved(gtt_obj[i])) {
197 radeon_object_unref(&gtt_obj[i]); 204 radeon_bo_unpin(gtt_obj[i]);
205 radeon_bo_unreserve(gtt_obj[i]);
206 }
207 radeon_bo_unref(&gtt_obj[i]);
198 } 208 }
199 } 209 }
200 kfree(gtt_obj); 210 kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
206 printk(KERN_WARNING "Error while testing BO move.\n"); 216 printk(KERN_WARNING "Error while testing BO move.\n");
207 } 217 }
208} 218}
209
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index eda4ade24c3a..5a19d529d1c0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
150 man->default_caching = TTM_PL_FLAG_CACHED; 150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break; 151 break;
152 case TTM_PL_TT: 152 case TTM_PL_TT:
153 man->gpu_offset = 0; 153 man->gpu_offset = rdev->mc.gtt_location;
154 man->available_caching = TTM_PL_MASK_CACHING; 154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED; 155 man->default_caching = TTM_PL_FLAG_CACHED;
156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
180 break; 180 break;
181 case TTM_PL_VRAM: 181 case TTM_PL_VRAM:
182 /* "On-card" video ram */ 182 /* "On-card" video ram */
183 man->gpu_offset = 0; 183 man->gpu_offset = rdev->mc.vram_location;
184 man->flags = TTM_MEMTYPE_FLAG_FIXED | 184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | 185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE; 186 TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
197 return 0; 197 return 0;
198} 198}
199 199
200static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) 200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement)
201{ 202{
202 uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; 203 struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
203
204 switch (bo->mem.mem_type) { 204 switch (bo->mem.mem_type) {
205 case TTM_PL_VRAM:
206 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
207 break;
208 case TTM_PL_TT:
205 default: 209 default:
206 return (cur_placement & ~TTM_PL_MASK_CACHING) | 210 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
207 TTM_PL_FLAG_SYSTEM |
208 TTM_PL_FLAG_CACHED;
209 } 211 }
212 *placement = rbo->placement;
210} 213}
211 214
212static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) 215static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
283 struct radeon_device *rdev; 286 struct radeon_device *rdev;
284 struct ttm_mem_reg *old_mem = &bo->mem; 287 struct ttm_mem_reg *old_mem = &bo->mem;
285 struct ttm_mem_reg tmp_mem; 288 struct ttm_mem_reg tmp_mem;
286 uint32_t proposed_placement; 289 u32 placements;
290 struct ttm_placement placement;
287 int r; 291 int r;
288 292
289 rdev = radeon_get_rdev(bo->bdev); 293 rdev = radeon_get_rdev(bo->bdev);
290 tmp_mem = *new_mem; 294 tmp_mem = *new_mem;
291 tmp_mem.mm_node = NULL; 295 tmp_mem.mm_node = NULL;
292 proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 296 placement.fpfn = 0;
293 r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, 297 placement.lpfn = 0;
298 placement.num_placement = 1;
299 placement.placement = &placements;
300 placement.num_busy_placement = 1;
301 placement.busy_placement = &placements;
302 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
303 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
294 interruptible, no_wait); 304 interruptible, no_wait);
295 if (unlikely(r)) { 305 if (unlikely(r)) {
296 return r; 306 return r;
@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
329 struct radeon_device *rdev; 339 struct radeon_device *rdev;
330 struct ttm_mem_reg *old_mem = &bo->mem; 340 struct ttm_mem_reg *old_mem = &bo->mem;
331 struct ttm_mem_reg tmp_mem; 341 struct ttm_mem_reg tmp_mem;
332 uint32_t proposed_flags; 342 struct ttm_placement placement;
343 u32 placements;
333 int r; 344 int r;
334 345
335 rdev = radeon_get_rdev(bo->bdev); 346 rdev = radeon_get_rdev(bo->bdev);
336 tmp_mem = *new_mem; 347 tmp_mem = *new_mem;
337 tmp_mem.mm_node = NULL; 348 tmp_mem.mm_node = NULL;
338 proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 349 placement.fpfn = 0;
339 r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, 350 placement.lpfn = 0;
340 interruptible, no_wait); 351 placement.num_placement = 1;
352 placement.placement = &placements;
353 placement.num_busy_placement = 1;
354 placement.busy_placement = &placements;
355 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
356 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
341 if (unlikely(r)) { 357 if (unlikely(r)) {
342 return r; 358 return r;
343 } 359 }
@@ -407,18 +423,6 @@ memcpy:
407 return r; 423 return r;
408} 424}
409 425
410const uint32_t radeon_mem_prios[] = {
411 TTM_PL_VRAM,
412 TTM_PL_TT,
413 TTM_PL_SYSTEM,
414};
415
416const uint32_t radeon_busy_prios[] = {
417 TTM_PL_TT,
418 TTM_PL_VRAM,
419 TTM_PL_SYSTEM,
420};
421
422static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, 426static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
423 bool lazy, bool interruptible) 427 bool lazy, bool interruptible)
424{ 428{
@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
446} 450}
447 451
448static struct ttm_bo_driver radeon_bo_driver = { 452static struct ttm_bo_driver radeon_bo_driver = {
449 .mem_type_prio = radeon_mem_prios,
450 .mem_busy_prio = radeon_busy_prios,
451 .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
452 .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, 453 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
454 .invalidate_caches = &radeon_invalidate_caches, 454 .invalidate_caches = &radeon_invalidate_caches,
455 .init_mem_type = &radeon_init_mem_type, 455 .init_mem_type = &radeon_init_mem_type,
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r); 482 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
483 return r; 483 return r;
484 } 484 }
485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, 485 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
486 ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); 486 rdev->mc.real_vram_size >> PAGE_SHIFT);
487 if (r) { 487 if (r) {
488 DRM_ERROR("Failed initializing VRAM heap.\n"); 488 DRM_ERROR("Failed initializing VRAM heap.\n");
489 return r; 489 return r;
490 } 490 }
491 r = radeon_object_create(rdev, NULL, 256 * 1024, true, 491 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
492 RADEON_GEM_DOMAIN_VRAM, false, 492 RADEON_GEM_DOMAIN_VRAM,
493 &rdev->stollen_vga_memory); 493 &rdev->stollen_vga_memory);
494 if (r) { 494 if (r) {
495 return r; 495 return r;
496 } 496 }
497 r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); 497 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
498 if (r)
499 return r;
500 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
501 radeon_bo_unreserve(rdev->stollen_vga_memory);
498 if (r) { 502 if (r) {
499 radeon_object_unref(&rdev->stollen_vga_memory); 503 radeon_bo_unref(&rdev->stollen_vga_memory);
500 return r; 504 return r;
501 } 505 }
502 DRM_INFO("radeon: %uM of VRAM memory ready\n", 506 DRM_INFO("radeon: %uM of VRAM memory ready\n",
503 (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); 507 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
504 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, 508 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
505 ((rdev->mc.gtt_size) >> PAGE_SHIFT)); 509 rdev->mc.gtt_size >> PAGE_SHIFT);
506 if (r) { 510 if (r) {
507 DRM_ERROR("Failed initializing GTT heap.\n"); 511 DRM_ERROR("Failed initializing GTT heap.\n");
508 return r; 512 return r;
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
523 527
524void radeon_ttm_fini(struct radeon_device *rdev) 528void radeon_ttm_fini(struct radeon_device *rdev)
525{ 529{
530 int r;
531
526 if (rdev->stollen_vga_memory) { 532 if (rdev->stollen_vga_memory) {
527 radeon_object_unpin(rdev->stollen_vga_memory); 533 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
528 radeon_object_unref(&rdev->stollen_vga_memory); 534 if (r == 0) {
535 radeon_bo_unpin(rdev->stollen_vga_memory);
536 radeon_bo_unreserve(rdev->stollen_vga_memory);
537 }
538 radeon_bo_unref(&rdev->stollen_vga_memory);
529 } 539 }
530 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); 540 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
531 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); 541 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index ca037160a582..c1fcdddb6be6 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
352 u32 tmp; 352 u32 tmp;
353 353
354 /* Setup GPU memory space */ 354 /* Setup GPU memory space */
355 tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); 355 tmp = RREG32(R_00015C_NB_TOM);
356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; 356 rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
357 rdev->mc.gtt_location = 0xFFFFFFFFUL; 357 rdev->mc.gtt_location = 0xFFFFFFFFUL;
358 r = radeon_mc_setup(rdev); 358 r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
387 r300_clock_startup(rdev); 387 r300_clock_startup(rdev);
388 /* Initialize GPU configuration (# pipes, ...) */ 388 /* Initialize GPU configuration (# pipes, ...) */
389 rs400_gpu_init(rdev); 389 rs400_gpu_init(rdev);
390 r100_enable_bm(rdev);
390 /* Initialize GART (initialize after TTM so we can allocate 391 /* Initialize GART (initialize after TTM so we can allocate
391 * memory through TTM but finalize after TTM) */ 392 * memory through TTM but finalize after TTM) */
392 r = rs400_gart_enable(rdev); 393 r = rs400_gart_enable(rdev);
393 if (r) 394 if (r)
394 return r; 395 return r;
395 /* Enable IRQ */ 396 /* Enable IRQ */
396 rdev->irq.sw_int = true;
397 r100_irq_set(rdev); 397 r100_irq_set(rdev);
398 /* 1M ring buffer */ 398 /* 1M ring buffer */
399 r = r100_cp_init(rdev, 1024 * 1024); 399 r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
430 radeon_combios_asic_init(rdev->ddev); 430 radeon_combios_asic_init(rdev->ddev);
431 /* Resume clock after posting */ 431 /* Resume clock after posting */
432 r300_clock_startup(rdev); 432 r300_clock_startup(rdev);
433 /* Initialize surface registers */
434 radeon_surface_init(rdev);
433 return rs400_startup(rdev); 435 return rs400_startup(rdev);
434} 436}
435 437
@@ -452,7 +454,7 @@ void rs400_fini(struct radeon_device *rdev)
452 rs400_gart_fini(rdev); 454 rs400_gart_fini(rdev);
453 radeon_irq_kms_fini(rdev); 455 radeon_irq_kms_fini(rdev);
454 radeon_fence_driver_fini(rdev); 456 radeon_fence_driver_fini(rdev);
455 radeon_object_fini(rdev); 457 radeon_bo_fini(rdev);
456 radeon_atombios_fini(rdev); 458 radeon_atombios_fini(rdev);
457 kfree(rdev->bios); 459 kfree(rdev->bios);
458 rdev->bios = NULL; 460 rdev->bios = NULL;
@@ -490,10 +492,9 @@ int rs400_init(struct radeon_device *rdev)
490 RREG32(R_0007C0_CP_STAT)); 492 RREG32(R_0007C0_CP_STAT));
491 } 493 }
492 /* check if cards are posted or not */ 494 /* check if cards are posted or not */
493 if (!radeon_card_posted(rdev) && rdev->bios) { 495 if (radeon_boot_test_post_card(rdev) == false)
494 DRM_INFO("GPU not posted. posting now...\n"); 496 return -EINVAL;
495 radeon_combios_asic_init(rdev->ddev); 497
496 }
497 /* Initialize clocks */ 498 /* Initialize clocks */
498 radeon_get_clock_info(rdev->ddev); 499 radeon_get_clock_info(rdev->ddev);
499 /* Get vram informations */ 500 /* Get vram informations */
@@ -510,7 +511,7 @@ int rs400_init(struct radeon_device *rdev)
510 if (r) 511 if (r)
511 return r; 512 return r;
512 /* Memory manager */ 513 /* Memory manager */
513 r = radeon_object_init(rdev); 514 r = radeon_bo_init(rdev);
514 if (r) 515 if (r)
515 return r; 516 return r;
516 r = rs400_gart_init(rdev); 517 r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f117cd8736a..4f8ea4260572 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,6 +45,122 @@
45void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
46int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
47 47
48int rs600_mc_init(struct radeon_device *rdev)
49{
50 /* read back the MC value from the hw */
51 int r;
52 u32 tmp;
53
54 /* Setup GPU memory space */
55 tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
56 rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
57 rdev->mc.gtt_location = 0xffffffffUL;
58 r = radeon_mc_setup(rdev);
59 if (r)
60 return r;
61 return 0;
62}
63
64/* hpd for digital panel detect/disconnect */
65bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
66{
67 u32 tmp;
68 bool connected = false;
69
70 switch (hpd) {
71 case RADEON_HPD_1:
72 tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
73 if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
74 connected = true;
75 break;
76 case RADEON_HPD_2:
77 tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
78 if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
79 connected = true;
80 break;
81 default:
82 break;
83 }
84 return connected;
85}
86
87void rs600_hpd_set_polarity(struct radeon_device *rdev,
88 enum radeon_hpd_id hpd)
89{
90 u32 tmp;
91 bool connected = rs600_hpd_sense(rdev, hpd);
92
93 switch (hpd) {
94 case RADEON_HPD_1:
95 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
96 if (connected)
97 tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
98 else
99 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
100 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
101 break;
102 case RADEON_HPD_2:
103 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
104 if (connected)
105 tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
106 else
107 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
108 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
109 break;
110 default:
111 break;
112 }
113}
114
115void rs600_hpd_init(struct radeon_device *rdev)
116{
117 struct drm_device *dev = rdev->ddev;
118 struct drm_connector *connector;
119
120 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
121 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
122 switch (radeon_connector->hpd.hpd) {
123 case RADEON_HPD_1:
124 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
125 S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
126 rdev->irq.hpd[0] = true;
127 break;
128 case RADEON_HPD_2:
129 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
130 S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
131 rdev->irq.hpd[1] = true;
132 break;
133 default:
134 break;
135 }
136 }
137 rs600_irq_set(rdev);
138}
139
140void rs600_hpd_fini(struct radeon_device *rdev)
141{
142 struct drm_device *dev = rdev->ddev;
143 struct drm_connector *connector;
144
145 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
146 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
147 switch (radeon_connector->hpd.hpd) {
148 case RADEON_HPD_1:
149 WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
150 S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
151 rdev->irq.hpd[0] = false;
152 break;
153 case RADEON_HPD_2:
154 WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
155 S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
156 rdev->irq.hpd[1] = false;
157 break;
158 default:
159 break;
160 }
161 }
162}
163
48/* 164/*
49 * GART. 165 * GART.
50 */ 166 */
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
100 WREG32(R_00004C_BUS_CNTL, tmp); 216 WREG32(R_00004C_BUS_CNTL, tmp);
101 /* FIXME: setup default page */ 217 /* FIXME: setup default page */
102 WREG32_MC(R_000100_MC_PT0_CNTL, 218 WREG32_MC(R_000100_MC_PT0_CNTL,
103 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | 219 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
104 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); 220 S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
221
105 for (i = 0; i < 19; i++) { 222 for (i = 0; i < 19; i++) {
106 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, 223 WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
107 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | 224 S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
108 S_00016C_SYSTEM_ACCESS_MODE_MASK( 225 S_00016C_SYSTEM_ACCESS_MODE_MASK(
109 V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | 226 V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
110 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( 227 S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
111 V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | 228 V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
112 S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | 229 S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
113 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | 230 S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
114 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); 231 S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
115 } 232 }
116
117 /* System context map to GART space */
118 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
119 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
120
121 /* enable first context */ 233 /* enable first context */
122 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
123 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
124 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, 234 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
125 S_000102_ENABLE_PAGE_TABLE(1) | 235 S_000102_ENABLE_PAGE_TABLE(1) |
126 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); 236 S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
237
127 /* disable all other contexts */ 238 /* disable all other contexts */
128 for (i = 1; i < 8; i++) { 239 for (i = 1; i < 8; i++)
129 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); 240 WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
130 }
131 241
132 /* setup the page table */ 242 /* setup the page table */
133 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, 243 WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
134 rdev->gart.table_addr); 244 rdev->gart.table_addr);
245 WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
246 WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
135 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); 247 WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
136 248
249 /* System context maps to VRAM space */
250 WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
251 WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
252
137 /* enable page tables */ 253 /* enable page tables */
138 tmp = RREG32_MC(R_000100_MC_PT0_CNTL); 254 tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
139 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); 255 WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
146 262
147void rs600_gart_disable(struct radeon_device *rdev) 263void rs600_gart_disable(struct radeon_device *rdev)
148{ 264{
149 uint32_t tmp; 265 u32 tmp;
266 int r;
150 267
151 /* FIXME: disable out of gart access */ 268 /* FIXME: disable out of gart access */
152 WREG32_MC(R_000100_MC_PT0_CNTL, 0); 269 WREG32_MC(R_000100_MC_PT0_CNTL, 0);
153 tmp = RREG32_MC(R_000009_MC_CNTL1); 270 tmp = RREG32_MC(R_000009_MC_CNTL1);
154 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); 271 WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
155 if (rdev->gart.table.vram.robj) { 272 if (rdev->gart.table.vram.robj) {
156 radeon_object_kunmap(rdev->gart.table.vram.robj); 273 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
157 radeon_object_unpin(rdev->gart.table.vram.robj); 274 if (r == 0) {
275 radeon_bo_kunmap(rdev->gart.table.vram.robj);
276 radeon_bo_unpin(rdev->gart.table.vram.robj);
277 radeon_bo_unreserve(rdev->gart.table.vram.robj);
278 }
158 } 279 }
159} 280}
160 281
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev)
189{ 310{
190 uint32_t tmp = 0; 311 uint32_t tmp = 0;
191 uint32_t mode_int = 0; 312 uint32_t mode_int = 0;
313 u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
314 ~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
315 u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
316 ~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
192 317
193 if (rdev->irq.sw_int) { 318 if (rdev->irq.sw_int) {
194 tmp |= S_000040_SW_INT_EN(1); 319 tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev)
199 if (rdev->irq.crtc_vblank_int[1]) { 324 if (rdev->irq.crtc_vblank_int[1]) {
200 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); 325 mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
201 } 326 }
327 if (rdev->irq.hpd[0]) {
328 hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
329 }
330 if (rdev->irq.hpd[1]) {
331 hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
332 }
202 WREG32(R_000040_GEN_INT_CNTL, tmp); 333 WREG32(R_000040_GEN_INT_CNTL, tmp);
203 WREG32(R_006540_DxMODE_INT_MASK, mode_int); 334 WREG32(R_006540_DxMODE_INT_MASK, mode_int);
335 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
336 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
204 return 0; 337 return 0;
205} 338}
206 339
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
208{ 341{
209 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); 342 uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
210 uint32_t irq_mask = ~C_000044_SW_INT; 343 uint32_t irq_mask = ~C_000044_SW_INT;
344 u32 tmp;
211 345
212 if (G_000044_DISPLAY_INT_STAT(irqs)) { 346 if (G_000044_DISPLAY_INT_STAT(irqs)) {
213 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); 347 *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
219 WREG32(R_006D34_D2MODE_VBLANK_STATUS, 353 WREG32(R_006D34_D2MODE_VBLANK_STATUS,
220 S_006D34_D2MODE_VBLANK_ACK(1)); 354 S_006D34_D2MODE_VBLANK_ACK(1));
221 } 355 }
356 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
357 tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
358 tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
359 WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
360 }
361 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
362 tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
363 tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
364 WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
365 }
222 } else { 366 } else {
223 *r500_disp_int = 0; 367 *r500_disp_int = 0;
224 } 368 }
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev)
244{ 388{
245 uint32_t status, msi_rearm; 389 uint32_t status, msi_rearm;
246 uint32_t r500_disp_int; 390 uint32_t r500_disp_int;
391 bool queue_hotplug = false;
247 392
248 status = rs600_irq_ack(rdev, &r500_disp_int); 393 status = rs600_irq_ack(rdev, &r500_disp_int);
249 if (!status && !r500_disp_int) { 394 if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev)
258 drm_handle_vblank(rdev->ddev, 0); 403 drm_handle_vblank(rdev->ddev, 0);
259 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) 404 if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
260 drm_handle_vblank(rdev->ddev, 1); 405 drm_handle_vblank(rdev->ddev, 1);
406 if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
407 queue_hotplug = true;
408 DRM_DEBUG("HPD1\n");
409 }
410 if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
411 queue_hotplug = true;
412 DRM_DEBUG("HPD2\n");
413 }
261 status = rs600_irq_ack(rdev, &r500_disp_int); 414 status = rs600_irq_ack(rdev, &r500_disp_int);
262 } 415 }
416 if (queue_hotplug)
417 queue_work(rdev->wq, &rdev->hotplug_work);
263 if (rdev->msi_enabled) { 418 if (rdev->msi_enabled) {
264 switch (rdev->family) { 419 switch (rdev->family) {
265 case CHIP_RS600: 420 case CHIP_RS600:
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
301 456
302void rs600_gpu_init(struct radeon_device *rdev) 457void rs600_gpu_init(struct radeon_device *rdev)
303{ 458{
304 /* FIXME: HDP same place on rs600 ? */
305 r100_hdp_reset(rdev); 459 r100_hdp_reset(rdev);
306 /* FIXME: is this correct ? */
307 r420_pipes_init(rdev); 460 r420_pipes_init(rdev);
308 /* Wait for mc idle */ 461 /* Wait for mc idle */
309 if (rs600_mc_wait_for_idle(rdev)) 462 if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
312 465
313void rs600_vram_info(struct radeon_device *rdev) 466void rs600_vram_info(struct radeon_device *rdev)
314{ 467{
315 /* FIXME: to do or is these values sane ? */
316 rdev->mc.vram_is_ddr = true; 468 rdev->mc.vram_is_ddr = true;
317 rdev->mc.vram_width = 128; 469 rdev->mc.vram_width = 128;
470
471 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
472 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
473
474 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
475 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
476
477 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
478 rdev->mc.mc_vram_size = rdev->mc.aper_size;
479
480 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
481 rdev->mc.real_vram_size = rdev->mc.aper_size;
318} 482}
319 483
320void rs600_bandwidth_update(struct radeon_device *rdev) 484void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev)
388 if (r) 552 if (r)
389 return r; 553 return r;
390 /* Enable IRQ */ 554 /* Enable IRQ */
391 rdev->irq.sw_int = true;
392 rs600_irq_set(rdev); 555 rs600_irq_set(rdev);
393 /* 1M ring buffer */ 556 /* 1M ring buffer */
394 r = r100_cp_init(rdev, 1024 * 1024); 557 r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
423 atom_asic_init(rdev->mode_info.atom_context); 586 atom_asic_init(rdev->mode_info.atom_context);
424 /* Resume clock after posting */ 587 /* Resume clock after posting */
425 rv515_clock_startup(rdev); 588 rv515_clock_startup(rdev);
589 /* Initialize surface registers */
590 radeon_surface_init(rdev);
426 return rs600_startup(rdev); 591 return rs600_startup(rdev);
427} 592}
428 593
@@ -445,7 +610,7 @@ void rs600_fini(struct radeon_device *rdev)
445 rs600_gart_fini(rdev); 610 rs600_gart_fini(rdev);
446 radeon_irq_kms_fini(rdev); 611 radeon_irq_kms_fini(rdev);
447 radeon_fence_driver_fini(rdev); 612 radeon_fence_driver_fini(rdev);
448 radeon_object_fini(rdev); 613 radeon_bo_fini(rdev);
449 radeon_atombios_fini(rdev); 614 radeon_atombios_fini(rdev);
450 kfree(rdev->bios); 615 kfree(rdev->bios);
451 rdev->bios = NULL; 616 rdev->bios = NULL;
@@ -482,10 +647,9 @@ int rs600_init(struct radeon_device *rdev)
482 RREG32(R_0007C0_CP_STAT)); 647 RREG32(R_0007C0_CP_STAT));
483 } 648 }
484 /* check if cards are posted or not */ 649 /* check if cards are posted or not */
485 if (!radeon_card_posted(rdev) && rdev->bios) { 650 if (radeon_boot_test_post_card(rdev) == false)
486 DRM_INFO("GPU not posted. posting now...\n"); 651 return -EINVAL;
487 atom_asic_init(rdev->mode_info.atom_context); 652
488 }
489 /* Initialize clocks */ 653 /* Initialize clocks */
490 radeon_get_clock_info(rdev->ddev); 654 radeon_get_clock_info(rdev->ddev);
491 /* Initialize power management */ 655 /* Initialize power management */
@@ -493,7 +657,7 @@ int rs600_init(struct radeon_device *rdev)
493 /* Get vram informations */ 657 /* Get vram informations */
494 rs600_vram_info(rdev); 658 rs600_vram_info(rdev);
495 /* Initialize memory controller (also test AGP) */ 659 /* Initialize memory controller (also test AGP) */
496 r = r420_mc_init(rdev); 660 r = rs600_mc_init(rdev);
497 if (r) 661 if (r)
498 return r; 662 return r;
499 rs600_debugfs(rdev); 663 rs600_debugfs(rdev);
@@ -505,7 +669,7 @@ int rs600_init(struct radeon_device *rdev)
505 if (r) 669 if (r)
506 return r; 670 return r;
507 /* Memory manager */ 671 /* Memory manager */
508 r = radeon_object_init(rdev); 672 r = radeon_bo_init(rdev);
509 if (r) 673 if (r)
510 return r; 674 return r;
511 r = rs600_gart_init(rdev); 675 r = rs600_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index 81308924859a..c1c8f5885cbb 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -30,27 +30,12 @@
30 30
31/* Registers */ 31/* Registers */
32#define R_000040_GEN_INT_CNTL 0x000040 32#define R_000040_GEN_INT_CNTL 0x000040
33#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) 33#define S_000040_SCRATCH_INT_MASK(x) (((x) & 0x1) << 18)
34#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) 34#define G_000040_SCRATCH_INT_MASK(x) (((x) >> 18) & 0x1)
35#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE 35#define C_000040_SCRATCH_INT_MASK 0xFFFBFFFF
36#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) 36#define S_000040_GUI_IDLE_MASK(x) (((x) & 0x1) << 19)
37#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) 37#define G_000040_GUI_IDLE_MASK(x) (((x) >> 19) & 0x1)
38#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF 38#define C_000040_GUI_IDLE_MASK 0xFFF7FFFF
39#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6)
40#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1)
41#define C_000040_CRTC2_VSYNC 0xFFFFFFBF
42#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7)
43#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1)
44#define C_000040_SNAPSHOT2 0xFFFFFF7F
45#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9)
46#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1)
47#define C_000040_CRTC2_VBLANK 0xFFFFFDFF
48#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10)
49#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1)
50#define C_000040_FP2_DETECT 0xFFFFFBFF
51#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11)
52#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1)
53#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF
54#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) 39#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13)
55#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) 40#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1)
56#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF 41#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF
@@ -370,7 +355,90 @@
370#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) 355#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5)
371#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) 356#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1)
372#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF 357#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF
373 358#define S_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 16)
359#define G_007EDC_DACA_AUTODETECT_INTERRUPT(x) (((x) >> 16) & 0x1)
360#define C_007EDC_DACA_AUTODETECT_INTERRUPT 0xFFFEFFFF
361#define S_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) & 0x1) << 17)
362#define G_007EDC_DACB_AUTODETECT_INTERRUPT(x) (((x) >> 17) & 0x1)
363#define C_007EDC_DACB_AUTODETECT_INTERRUPT 0xFFFDFFFF
364#define S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) & 0x1) << 18)
365#define G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x) (((x) >> 18) & 0x1)
366#define C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT 0xFFFBFFFF
367#define S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) & 0x1) << 19)
368#define G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x) (((x) >> 19) & 0x1)
369#define C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT 0xFFF7FFFF
370#define R_007828_DACA_AUTODETECT_CONTROL 0x007828
371#define S_007828_DACA_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
372#define G_007828_DACA_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
373#define C_007828_DACA_AUTODETECT_MODE 0xFFFFFFFC
374#define S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
375#define G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
376#define C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
377#define S_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
378#define G_007828_DACA_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
379#define C_007828_DACA_AUTODETECT_CHECK_MASK 0xFFFCFFFF
380#define R_007838_DACA_AUTODETECT_INT_CONTROL 0x007838
381#define S_007838_DACA_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
382#define C_007838_DACA_DACA_AUTODETECT_ACK 0xFFFFFFFE
383#define S_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
384#define G_007838_DACA_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
385#define C_007838_DACA_AUTODETECT_INT_ENABLE 0xFFFCFFFF
386#define R_007A28_DACB_AUTODETECT_CONTROL 0x007A28
387#define S_007A28_DACB_AUTODETECT_MODE(x) (((x) & 0x3) << 0)
388#define G_007A28_DACB_AUTODETECT_MODE(x) (((x) >> 0) & 0x3)
389#define C_007A28_DACB_AUTODETECT_MODE 0xFFFFFFFC
390#define S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
391#define G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
392#define C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER 0xFFFF00FF
393#define S_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) & 0x3) << 16)
394#define G_007A28_DACB_AUTODETECT_CHECK_MASK(x) (((x) >> 16) & 0x3)
395#define C_007A28_DACB_AUTODETECT_CHECK_MASK 0xFFFCFFFF
396#define R_007A38_DACB_AUTODETECT_INT_CONTROL 0x007A38
397#define S_007A38_DACB_AUTODETECT_ACK(x) (((x) & 0x1) << 0)
398#define C_007A38_DACB_DACA_AUTODETECT_ACK 0xFFFFFFFE
399#define S_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) & 0x1) << 16)
400#define G_007A38_DACB_AUTODETECT_INT_ENABLE(x) (((x) >> 16) & 0x1)
401#define C_007A38_DACB_AUTODETECT_INT_ENABLE 0xFFFCFFFF
402#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL 0x007D00
403#define S_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) & 0x1) << 0)
404#define G_007D00_DC_HOT_PLUG_DETECT1_EN(x) (((x) >> 0) & 0x1)
405#define C_007D00_DC_HOT_PLUG_DETECT1_EN 0xFFFFFFFE
406#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0x007D04
407#define S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) & 0x1) << 0)
408#define G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x) (((x) >> 0) & 0x1)
409#define C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS 0xFFFFFFFE
410#define S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) & 0x1) << 1)
411#define G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x) (((x) >> 1) & 0x1)
412#define C_007D04_DC_HOT_PLUG_DETECT1_SENSE 0xFFFFFFFD
413#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL 0x007D08
414#define S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x) (((x) & 0x1) << 0)
415#define C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK 0xFFFFFFFE
416#define S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
417#define G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
418#define C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY 0xFFFFFEFF
419#define S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) & 0x1) << 16)
420#define G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x) (((x) >> 16) & 0x1)
421#define C_007D08_DC_HOT_PLUG_DETECT1_INT_EN 0xFFFEFFFF
422#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL 0x007D10
423#define S_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) & 0x1) << 0)
424#define G_007D10_DC_HOT_PLUG_DETECT2_EN(x) (((x) >> 0) & 0x1)
425#define C_007D10_DC_HOT_PLUG_DETECT2_EN 0xFFFFFFFE
426#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0x007D14
427#define S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) & 0x1) << 0)
428#define G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x) (((x) >> 0) & 0x1)
429#define C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS 0xFFFFFFFE
430#define S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) & 0x1) << 1)
431#define G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x) (((x) >> 1) & 0x1)
432#define C_007D14_DC_HOT_PLUG_DETECT2_SENSE 0xFFFFFFFD
433#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL 0x007D18
434#define S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x) (((x) & 0x1) << 0)
435#define C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK 0xFFFFFFFE
436#define S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
437#define G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
438#define C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY 0xFFFFFEFF
439#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
440#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
441#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
374 442
375/* MC registers */ 443/* MC registers */
376#define R_000000_MC_STATUS 0x000000 444#define R_000000_MC_STATUS 0x000000
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 27547175cf93..1e22f52d6039 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
131 131
132void rs690_vram_info(struct radeon_device *rdev) 132void rs690_vram_info(struct radeon_device *rdev)
133{ 133{
134 uint32_t tmp;
135 fixed20_12 a; 134 fixed20_12 a;
136 135
137 rs400_gart_adjust_size(rdev); 136 rs400_gart_adjust_size(rdev);
138 /* DDR for all card after R300 & IGP */ 137
139 rdev->mc.vram_is_ddr = true; 138 rdev->mc.vram_is_ddr = true;
140 /* FIXME: is this correct for RS690/RS740 ? */ 139 rdev->mc.vram_width = 128;
141 tmp = RREG32(RADEON_MEM_CNTL); 140
142 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
143 rdev->mc.vram_width = 128;
144 } else {
145 rdev->mc.vram_width = 64;
146 }
147 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 141 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
148 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 142 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
149 143
150 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); 144 rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
151 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); 145 rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
146
147 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
148 rdev->mc.mc_vram_size = rdev->mc.aper_size;
149
150 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
151 rdev->mc.real_vram_size = rdev->mc.aper_size;
152
152 rs690_pm_info(rdev); 153 rs690_pm_info(rdev);
153 /* FIXME: we should enforce default clock in case GPU is not in 154 /* FIXME: we should enforce default clock in case GPU is not in
154 * default setup 155 * default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
161 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); 162 rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
162} 163}
163 164
165static int rs690_mc_init(struct radeon_device *rdev)
166{
167 int r;
168 u32 tmp;
169
170 /* Setup GPU memory space */
171 tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
172 rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
173 rdev->mc.gtt_location = 0xFFFFFFFFUL;
174 r = radeon_mc_setup(rdev);
175 if (r)
176 return r;
177 return 0;
178}
179
164void rs690_line_buffer_adjust(struct radeon_device *rdev, 180void rs690_line_buffer_adjust(struct radeon_device *rdev,
165 struct drm_display_mode *mode1, 181 struct drm_display_mode *mode1,
166 struct drm_display_mode *mode2) 182 struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
244 260
245 b.full = rfixed_const(mode->crtc_hdisplay); 261 b.full = rfixed_const(mode->crtc_hdisplay);
246 c.full = rfixed_const(256); 262 c.full = rfixed_const(256);
247 a.full = rfixed_mul(wm->num_line_pair, b); 263 a.full = rfixed_div(b, c);
248 request_fifo_depth.full = rfixed_div(a, c); 264 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
265 request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
249 if (a.full < rfixed_const(4)) { 266 if (a.full < rfixed_const(4)) {
250 wm->lb_request_fifo_depth = 4; 267 wm->lb_request_fifo_depth = 4;
251 } else { 268 } else {
@@ -374,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
374 a.full = rfixed_const(16); 391 a.full = rfixed_const(16);
375 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 392 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
376 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 393 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
394 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
377 395
378 /* Determine estimated width */ 396 /* Determine estimated width */
379 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 397 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
383 } else { 401 } else {
384 a.full = rfixed_const(16); 402 a.full = rfixed_const(16);
385 wm->priority_mark.full = rfixed_div(estimated_width, a); 403 wm->priority_mark.full = rfixed_div(estimated_width, a);
404 wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
386 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 405 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
387 } 406 }
388} 407}
@@ -605,7 +624,6 @@ static int rs690_startup(struct radeon_device *rdev)
605 if (r) 624 if (r)
606 return r; 625 return r;
607 /* Enable IRQ */ 626 /* Enable IRQ */
608 rdev->irq.sw_int = true;
609 rs600_irq_set(rdev); 627 rs600_irq_set(rdev);
610 /* 1M ring buffer */ 628 /* 1M ring buffer */
611 r = r100_cp_init(rdev, 1024 * 1024); 629 r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
640 atom_asic_init(rdev->mode_info.atom_context); 658 atom_asic_init(rdev->mode_info.atom_context);
641 /* Resume clock after posting */ 659 /* Resume clock after posting */
642 rv515_clock_startup(rdev); 660 rv515_clock_startup(rdev);
661 /* Initialize surface registers */
662 radeon_surface_init(rdev);
643 return rs690_startup(rdev); 663 return rs690_startup(rdev);
644} 664}
645 665
@@ -662,7 +682,7 @@ void rs690_fini(struct radeon_device *rdev)
662 rs400_gart_fini(rdev); 682 rs400_gart_fini(rdev);
663 radeon_irq_kms_fini(rdev); 683 radeon_irq_kms_fini(rdev);
664 radeon_fence_driver_fini(rdev); 684 radeon_fence_driver_fini(rdev);
665 radeon_object_fini(rdev); 685 radeon_bo_fini(rdev);
666 radeon_atombios_fini(rdev); 686 radeon_atombios_fini(rdev);
667 kfree(rdev->bios); 687 kfree(rdev->bios);
668 rdev->bios = NULL; 688 rdev->bios = NULL;
@@ -700,10 +720,9 @@ int rs690_init(struct radeon_device *rdev)
700 RREG32(R_0007C0_CP_STAT)); 720 RREG32(R_0007C0_CP_STAT));
701 } 721 }
702 /* check if cards are posted or not */ 722 /* check if cards are posted or not */
703 if (!radeon_card_posted(rdev) && rdev->bios) { 723 if (radeon_boot_test_post_card(rdev) == false)
704 DRM_INFO("GPU not posted. posting now...\n"); 724 return -EINVAL;
705 atom_asic_init(rdev->mode_info.atom_context); 725
706 }
707 /* Initialize clocks */ 726 /* Initialize clocks */
708 radeon_get_clock_info(rdev->ddev); 727 radeon_get_clock_info(rdev->ddev);
709 /* Initialize power management */ 728 /* Initialize power management */
@@ -711,7 +730,7 @@ int rs690_init(struct radeon_device *rdev)
711 /* Get vram informations */ 730 /* Get vram informations */
712 rs690_vram_info(rdev); 731 rs690_vram_info(rdev);
713 /* Initialize memory controller (also test AGP) */ 732 /* Initialize memory controller (also test AGP) */
714 r = r420_mc_init(rdev); 733 r = rs690_mc_init(rdev);
715 if (r) 734 if (r)
716 return r; 735 return r;
717 rv515_debugfs(rdev); 736 rv515_debugfs(rdev);
@@ -723,7 +742,7 @@ int rs690_init(struct radeon_device *rdev)
723 if (r) 742 if (r)
724 return r; 743 return r;
725 /* Memory manager */ 744 /* Memory manager */
726 r = radeon_object_init(rdev); 745 r = radeon_bo_init(rdev);
727 if (r) 746 if (r)
728 return r; 747 return r;
729 r = rs400_gart_init(rdev); 748 r = rs400_gart_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index ba68c9fe90a1..59632a506b46 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
478 return r; 478 return r;
479 } 479 }
480 /* Enable IRQ */ 480 /* Enable IRQ */
481 rdev->irq.sw_int = true;
482 rs600_irq_set(rdev); 481 rs600_irq_set(rdev);
483 /* 1M ring buffer */ 482 /* 1M ring buffer */
484 r = r100_cp_init(rdev, 1024 * 1024); 483 r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
514 atom_asic_init(rdev->mode_info.atom_context); 513 atom_asic_init(rdev->mode_info.atom_context);
515 /* Resume clock after posting */ 514 /* Resume clock after posting */
516 rv515_clock_startup(rdev); 515 rv515_clock_startup(rdev);
516 /* Initialize surface registers */
517 radeon_surface_init(rdev);
517 return rv515_startup(rdev); 518 return rv515_startup(rdev);
518} 519}
519 520
@@ -540,11 +541,11 @@ void rv515_fini(struct radeon_device *rdev)
540 r100_wb_fini(rdev); 541 r100_wb_fini(rdev);
541 r100_ib_fini(rdev); 542 r100_ib_fini(rdev);
542 radeon_gem_fini(rdev); 543 radeon_gem_fini(rdev);
543 rv370_pcie_gart_fini(rdev); 544 rv370_pcie_gart_fini(rdev);
544 radeon_agp_fini(rdev); 545 radeon_agp_fini(rdev);
545 radeon_irq_kms_fini(rdev); 546 radeon_irq_kms_fini(rdev);
546 radeon_fence_driver_fini(rdev); 547 radeon_fence_driver_fini(rdev);
547 radeon_object_fini(rdev); 548 radeon_bo_fini(rdev);
548 radeon_atombios_fini(rdev); 549 radeon_atombios_fini(rdev);
549 kfree(rdev->bios); 550 kfree(rdev->bios);
550 rdev->bios = NULL; 551 rdev->bios = NULL;
@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
580 RREG32(R_0007C0_CP_STAT)); 581 RREG32(R_0007C0_CP_STAT));
581 } 582 }
582 /* check if cards are posted or not */ 583 /* check if cards are posted or not */
583 if (!radeon_card_posted(rdev) && rdev->bios) { 584 if (radeon_boot_test_post_card(rdev) == false)
584 DRM_INFO("GPU not posted. posting now...\n"); 585 return -EINVAL;
585 atom_asic_init(rdev->mode_info.atom_context);
586 }
587 /* Initialize clocks */ 586 /* Initialize clocks */
588 radeon_get_clock_info(rdev->ddev); 587 radeon_get_clock_info(rdev->ddev);
589 /* Initialize power management */ 588 /* Initialize power management */
@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
603 if (r) 602 if (r)
604 return r; 603 return r;
605 /* Memory manager */ 604 /* Memory manager */
606 r = radeon_object_init(rdev); 605 r = radeon_bo_init(rdev);
607 if (r) 606 if (r)
608 return r; 607 return r;
609 r = rv370_pcie_gart_init(rdev); 608 r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
892 891
893 b.full = rfixed_const(mode->crtc_hdisplay); 892 b.full = rfixed_const(mode->crtc_hdisplay);
894 c.full = rfixed_const(256); 893 c.full = rfixed_const(256);
895 a.full = rfixed_mul(wm->num_line_pair, b); 894 a.full = rfixed_div(b, c);
896 request_fifo_depth.full = rfixed_div(a, c); 895 request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
896 request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
897 if (a.full < rfixed_const(4)) { 897 if (a.full < rfixed_const(4)) {
898 wm->lb_request_fifo_depth = 4; 898 wm->lb_request_fifo_depth = 4;
899 } else { 899 } else {
@@ -995,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
995 a.full = rfixed_const(16); 995 a.full = rfixed_const(16);
996 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); 996 wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
997 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); 997 wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
998 wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
998 999
999 /* Determine estimated width */ 1000 /* Determine estimated width */
1000 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; 1001 estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
1001 estimated_width.full = rfixed_div(estimated_width, consumption_time); 1002 estimated_width.full = rfixed_div(estimated_width, consumption_time);
1002 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { 1003 if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
1003 wm->priority_mark.full = rfixed_const(10); 1004 wm->priority_mark.full = wm->priority_mark_max.full;
1004 } else { 1005 } else {
1005 a.full = rfixed_const(16); 1006 a.full = rfixed_const(16);
1006 wm->priority_mark.full = rfixed_div(estimated_width, a); 1007 wm->priority_mark.full = rfixed_div(estimated_width, a);
1008 wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
1007 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; 1009 wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
1008 } 1010 }
1009} 1011}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 5e06ee7076f5..fbb0357f1ec3 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
92void rv770_pcie_gart_disable(struct radeon_device *rdev) 92void rv770_pcie_gart_disable(struct radeon_device *rdev)
93{ 93{
94 u32 tmp; 94 u32 tmp;
95 int i; 95 int i, r;
96 96
97 /* Disable all tables */ 97 /* Disable all tables */
98 for (i = 0; i < 7; i++) 98 for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 113 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); 114 WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115 if (rdev->gart.table.vram.robj) { 115 if (rdev->gart.table.vram.robj) {
116 radeon_object_kunmap(rdev->gart.table.vram.robj); 116 r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
117 radeon_object_unpin(rdev->gart.table.vram.robj); 117 if (likely(r == 0)) {
118 radeon_bo_kunmap(rdev->gart.table.vram.robj);
119 radeon_bo_unpin(rdev->gart.table.vram.robj);
120 radeon_bo_unreserve(rdev->gart.table.vram.robj);
121 }
118 } 122 }
119} 123}
120 124
@@ -870,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
870{ 874{
871 int r; 875 int r;
872 876
877 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
878 r = r600_init_microcode(rdev);
879 if (r) {
880 DRM_ERROR("Failed to load firmware!\n");
881 return r;
882 }
883 }
884
873 rv770_mc_program(rdev); 885 rv770_mc_program(rdev);
874 if (rdev->flags & RADEON_IS_AGP) { 886 if (rdev->flags & RADEON_IS_AGP) {
875 rv770_agp_enable(rdev); 887 rv770_agp_enable(rdev);
@@ -880,13 +892,26 @@ static int rv770_startup(struct radeon_device *rdev)
880 } 892 }
881 rv770_gpu_init(rdev); 893 rv770_gpu_init(rdev);
882 894
883 r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, 895 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
884 &rdev->r600_blit.shader_gpu_addr); 896 if (unlikely(r != 0))
897 return r;
898 r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
899 &rdev->r600_blit.shader_gpu_addr);
900 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
885 if (r) { 901 if (r) {
886 DRM_ERROR("failed to pin blit object %d\n", r); 902 DRM_ERROR("failed to pin blit object %d\n", r);
887 return r; 903 return r;
888 } 904 }
889 905
906 /* Enable IRQ */
907 r = r600_irq_init(rdev);
908 if (r) {
909 DRM_ERROR("radeon: IH init failed (%d).\n", r);
910 radeon_irq_kms_fini(rdev);
911 return r;
912 }
913 r600_irq_set(rdev);
914
890 r = radeon_ring_init(rdev, rdev->cp.ring_size); 915 r = radeon_ring_init(rdev, rdev->cp.ring_size);
891 if (r) 916 if (r)
892 return r; 917 return r;
@@ -934,13 +959,19 @@ int rv770_resume(struct radeon_device *rdev)
934 959
935int rv770_suspend(struct radeon_device *rdev) 960int rv770_suspend(struct radeon_device *rdev)
936{ 961{
962 int r;
963
937 /* FIXME: we should wait for ring to be empty */ 964 /* FIXME: we should wait for ring to be empty */
938 r700_cp_stop(rdev); 965 r700_cp_stop(rdev);
939 rdev->cp.ready = false; 966 rdev->cp.ready = false;
940 r600_wb_disable(rdev); 967 r600_wb_disable(rdev);
941 rv770_pcie_gart_disable(rdev); 968 rv770_pcie_gart_disable(rdev);
942 /* unpin shaders bo */ 969 /* unpin shaders bo */
943 radeon_object_unpin(rdev->r600_blit.shader_obj); 970 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
971 if (likely(r == 0)) {
972 radeon_bo_unpin(rdev->r600_blit.shader_obj);
973 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
974 }
944 return 0; 975 return 0;
945} 976}
946 977
@@ -975,7 +1006,11 @@ int rv770_init(struct radeon_device *rdev)
975 if (r) 1006 if (r)
976 return r; 1007 return r;
977 /* Post card if necessary */ 1008 /* Post card if necessary */
978 if (!r600_card_posted(rdev) && rdev->bios) { 1009 if (!r600_card_posted(rdev)) {
1010 if (!rdev->bios) {
1011 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1012 return -EINVAL;
1013 }
979 DRM_INFO("GPU not posted. posting now...\n"); 1014 DRM_INFO("GPU not posted. posting now...\n");
980 atom_asic_init(rdev->mode_info.atom_context); 1015 atom_asic_init(rdev->mode_info.atom_context);
981 } 1016 }
@@ -998,31 +1033,31 @@ int rv770_init(struct radeon_device *rdev)
998 if (r) 1033 if (r)
999 return r; 1034 return r;
1000 /* Memory manager */ 1035 /* Memory manager */
1001 r = radeon_object_init(rdev); 1036 r = radeon_bo_init(rdev);
1037 if (r)
1038 return r;
1039
1040 r = radeon_irq_kms_init(rdev);
1002 if (r) 1041 if (r)
1003 return r; 1042 return r;
1043
1004 rdev->cp.ring_obj = NULL; 1044 rdev->cp.ring_obj = NULL;
1005 r600_ring_init(rdev, 1024 * 1024); 1045 r600_ring_init(rdev, 1024 * 1024);
1006 1046
1007 if (!rdev->me_fw || !rdev->pfp_fw) { 1047 rdev->ih.ring_obj = NULL;
1008 r = r600_cp_init_microcode(rdev); 1048 r600_ih_ring_init(rdev, 64 * 1024);
1009 if (r) {
1010 DRM_ERROR("Failed to load firmware!\n");
1011 return r;
1012 }
1013 }
1014 1049
1015 r = r600_pcie_gart_init(rdev); 1050 r = r600_pcie_gart_init(rdev);
1016 if (r) 1051 if (r)
1017 return r; 1052 return r;
1018 1053
1019 rdev->accel_working = true;
1020 r = r600_blit_init(rdev); 1054 r = r600_blit_init(rdev);
1021 if (r) { 1055 if (r) {
1022 DRM_ERROR("radeon: failled blitter (%d).\n", r); 1056 DRM_ERROR("radeon: failed blitter (%d).\n", r);
1023 rdev->accel_working = false; 1057 return r;
1024 } 1058 }
1025 1059
1060 rdev->accel_working = true;
1026 r = rv770_startup(rdev); 1061 r = rv770_startup(rdev);
1027 if (r) { 1062 if (r) {
1028 rv770_suspend(rdev); 1063 rv770_suspend(rdev);
@@ -1034,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
1034 if (rdev->accel_working) { 1069 if (rdev->accel_working) {
1035 r = radeon_ib_pool_init(rdev); 1070 r = radeon_ib_pool_init(rdev);
1036 if (r) { 1071 if (r) {
1037 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); 1072 DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
1038 rdev->accel_working = false; 1073 rdev->accel_working = false;
1039 } 1074 }
1040 r = r600_ib_test(rdev); 1075 r = r600_ib_test(rdev);
1041 if (r) { 1076 if (r) {
1042 DRM_ERROR("radeon: failled testing IB (%d).\n", r); 1077 DRM_ERROR("radeon: failed testing IB (%d).\n", r);
1043 rdev->accel_working = false; 1078 rdev->accel_working = false;
1044 } 1079 }
1045 } 1080 }
@@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev)
1051 rv770_suspend(rdev); 1086 rv770_suspend(rdev);
1052 1087
1053 r600_blit_fini(rdev); 1088 r600_blit_fini(rdev);
1089 r600_irq_fini(rdev);
1090 radeon_irq_kms_fini(rdev);
1054 radeon_ring_fini(rdev); 1091 radeon_ring_fini(rdev);
1055 r600_wb_fini(rdev); 1092 r600_wb_fini(rdev);
1056 rv770_pcie_gart_fini(rdev); 1093 rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
1059 radeon_clocks_fini(rdev); 1096 radeon_clocks_fini(rdev);
1060 if (rdev->flags & RADEON_IS_AGP) 1097 if (rdev->flags & RADEON_IS_AGP)
1061 radeon_agp_fini(rdev); 1098 radeon_agp_fini(rdev);
1062 radeon_object_fini(rdev); 1099 radeon_bo_fini(rdev);
1063 radeon_atombios_fini(rdev); 1100 radeon_atombios_fini(rdev);
1064 kfree(rdev->bios); 1101 kfree(rdev->bios);
1065 rdev->bios = NULL; 1102 rdev->bios = NULL;
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b0a9de7a57c2..1e138f5bae09 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,6 +3,7 @@
3 3
4ccflags-y := -Iinclude/drm 4ccflags-y := -Iinclude/drm
5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ 5ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o 6 ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
7 ttm_object.o ttm_lock.o ttm_execbuf_util.o
7 8
8obj-$(CONFIG_DRM_TTM) += ttm.o 9obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 87c06252d464..1fbb2eea5e88 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,6 +27,14 @@
27/* 27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */ 29 */
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
30 38
31#include "ttm/ttm_module.h" 39#include "ttm/ttm_module.h"
32#include "ttm/ttm_bo_driver.h" 40#include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
51 .mode = S_IRUGO 59 .mode = S_IRUGO
52}; 60};
53 61
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
75 struct ttm_mem_type_manager *man)
76{
77 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
78 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
79 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
80 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
81 printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
82 printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
83 printk(KERN_ERR TTM_PFX " size: %ld\n", (unsigned long)man->size);
84 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
85 man->available_caching);
86 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
87 man->default_caching);
88 spin_lock(&glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&glob->lru_lock);
91}
92
93static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
94 struct ttm_placement *placement)
95{
96 struct ttm_bo_device *bdev = bo->bdev;
97 struct ttm_bo_global *glob = bo->glob;
98 struct ttm_mem_type_manager *man;
99 int i, ret, mem_type;
100
101 printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
102 bo, bo->mem.num_pages, bo->mem.size >> 10,
103 bo->mem.size >> 20);
104 for (i = 0; i < placement->num_placement; i++) {
105 ret = ttm_mem_type_from_flags(placement->placement[i],
106 &mem_type);
107 if (ret)
108 return;
109 man = &bdev->man[mem_type];
110 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
111 i, placement->placement[i], mem_type);
112 ttm_mem_type_manager_debug(glob, man);
113 }
114}
115
54static ssize_t ttm_bo_global_show(struct kobject *kobj, 116static ssize_t ttm_bo_global_show(struct kobject *kobj,
55 struct attribute *attr, 117 struct attribute *attr,
56 char *buffer) 118 char *buffer)
@@ -117,12 +179,13 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
117 ret = wait_event_interruptible(bo->event_queue, 179 ret = wait_event_interruptible(bo->event_queue,
118 atomic_read(&bo->reserved) == 0); 180 atomic_read(&bo->reserved) == 0);
119 if (unlikely(ret != 0)) 181 if (unlikely(ret != 0))
120 return -ERESTART; 182 return ret;
121 } else { 183 } else {
122 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); 184 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
123 } 185 }
124 return 0; 186 return 0;
125} 187}
188EXPORT_SYMBOL(ttm_bo_wait_unreserved);
126 189
127static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) 190static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
128{ 191{
@@ -247,7 +310,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
247/* 310/*
248 * Call bo->mutex locked. 311 * Call bo->mutex locked.
249 */ 312 */
250
251static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) 313static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
252{ 314{
253 struct ttm_bo_device *bdev = bo->bdev; 315 struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +337,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
275 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, 337 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
276 page_flags | TTM_PAGE_FLAG_USER, 338 page_flags | TTM_PAGE_FLAG_USER,
277 glob->dummy_read_page); 339 glob->dummy_read_page);
278 if (unlikely(bo->ttm == NULL)) 340 if (unlikely(bo->ttm == NULL)) {
279 ret = -ENOMEM; 341 ret = -ENOMEM;
280 break; 342 break;
343 }
281 344
282 ret = ttm_tt_set_user(bo->ttm, current, 345 ret = ttm_tt_set_user(bo->ttm, current,
283 bo->buffer_start, bo->num_pages); 346 bo->buffer_start, bo->num_pages);
@@ -328,14 +391,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
328 } 391 }
329 392
330 if (bo->mem.mem_type == TTM_PL_SYSTEM) { 393 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
331 394 bo->mem = *mem;
332 struct ttm_mem_reg *old_mem = &bo->mem;
333 uint32_t save_flags = old_mem->placement;
334
335 *old_mem = *mem;
336 mem->mm_node = NULL; 395 mem->mm_node = NULL;
337 ttm_flag_masked(&save_flags, mem->placement,
338 TTM_PL_MASK_MEMTYPE);
339 goto moved; 396 goto moved;
340 } 397 }
341 398
@@ -418,6 +475,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
418 kref_put(&bo->list_kref, ttm_bo_ref_bug); 475 kref_put(&bo->list_kref, ttm_bo_ref_bug);
419 } 476 }
420 if (bo->mem.mm_node) { 477 if (bo->mem.mm_node) {
478 bo->mem.mm_node->private = NULL;
421 drm_mm_put_block(bo->mem.mm_node); 479 drm_mm_put_block(bo->mem.mm_node);
422 bo->mem.mm_node = NULL; 480 bo->mem.mm_node = NULL;
423 } 481 }
@@ -554,24 +612,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
554} 612}
555EXPORT_SYMBOL(ttm_bo_unref); 613EXPORT_SYMBOL(ttm_bo_unref);
556 614
557static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, 615static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
558 bool interruptible, bool no_wait) 616 bool no_wait)
559{ 617{
560 int ret = 0;
561 struct ttm_bo_device *bdev = bo->bdev; 618 struct ttm_bo_device *bdev = bo->bdev;
562 struct ttm_bo_global *glob = bo->glob; 619 struct ttm_bo_global *glob = bo->glob;
563 struct ttm_mem_reg evict_mem; 620 struct ttm_mem_reg evict_mem;
564 uint32_t proposed_placement; 621 struct ttm_placement placement;
565 622 int ret = 0;
566 if (bo->mem.mem_type != mem_type)
567 goto out;
568 623
569 spin_lock(&bo->lock); 624 spin_lock(&bo->lock);
570 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 625 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
571 spin_unlock(&bo->lock); 626 spin_unlock(&bo->lock);
572 627
573 if (unlikely(ret != 0)) { 628 if (unlikely(ret != 0)) {
574 if (ret != -ERESTART) { 629 if (ret != -ERESTARTSYS) {
575 printk(KERN_ERR TTM_PFX 630 printk(KERN_ERR TTM_PFX
576 "Failed to expire sync object before " 631 "Failed to expire sync object before "
577 "buffer eviction.\n"); 632 "buffer eviction.\n");
@@ -584,116 +639,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
584 evict_mem = bo->mem; 639 evict_mem = bo->mem;
585 evict_mem.mm_node = NULL; 640 evict_mem.mm_node = NULL;
586 641
587 proposed_placement = bdev->driver->evict_flags(bo); 642 placement.fpfn = 0;
588 643 placement.lpfn = 0;
589 ret = ttm_bo_mem_space(bo, proposed_placement, 644 placement.num_placement = 0;
590 &evict_mem, interruptible, no_wait); 645 placement.num_busy_placement = 0;
591 if (unlikely(ret != 0 && ret != -ERESTART)) 646 bdev->driver->evict_flags(bo, &placement);
592 ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, 647 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
593 &evict_mem, interruptible, no_wait); 648 no_wait);
594
595 if (ret) { 649 if (ret) {
596 if (ret != -ERESTART) 650 if (ret != -ERESTARTSYS) {
597 printk(KERN_ERR TTM_PFX 651 printk(KERN_ERR TTM_PFX
598 "Failed to find memory space for " 652 "Failed to find memory space for "
599 "buffer 0x%p eviction.\n", bo); 653 "buffer 0x%p eviction.\n", bo);
654 ttm_bo_mem_space_debug(bo, &placement);
655 }
600 goto out; 656 goto out;
601 } 657 }
602 658
603 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 659 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
604 no_wait); 660 no_wait);
605 if (ret) { 661 if (ret) {
606 if (ret != -ERESTART) 662 if (ret != -ERESTARTSYS)
607 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); 663 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
664 spin_lock(&glob->lru_lock);
665 if (evict_mem.mm_node) {
666 evict_mem.mm_node->private = NULL;
667 drm_mm_put_block(evict_mem.mm_node);
668 evict_mem.mm_node = NULL;
669 }
670 spin_unlock(&glob->lru_lock);
608 goto out; 671 goto out;
609 } 672 }
673 bo->evicted = true;
674out:
675 return ret;
676}
677
678static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
679 uint32_t mem_type,
680 bool interruptible, bool no_wait)
681{
682 struct ttm_bo_global *glob = bdev->glob;
683 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
684 struct ttm_buffer_object *bo;
685 int ret, put_count = 0;
610 686
611 spin_lock(&glob->lru_lock); 687 spin_lock(&glob->lru_lock);
612 if (evict_mem.mm_node) { 688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
613 drm_mm_put_block(evict_mem.mm_node); 689 kref_get(&bo->list_kref);
614 evict_mem.mm_node = NULL; 690 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
615 } 691 if (likely(ret == 0))
692 put_count = ttm_bo_del_from_lru(bo);
616 spin_unlock(&glob->lru_lock); 693 spin_unlock(&glob->lru_lock);
617 bo->evicted = true; 694 if (unlikely(ret != 0))
618out: 695 return ret;
696 while (put_count--)
697 kref_put(&bo->list_kref, ttm_bo_ref_bug);
698 ret = ttm_bo_evict(bo, interruptible, no_wait);
699 ttm_bo_unreserve(bo);
700 kref_put(&bo->list_kref, ttm_bo_release_list);
619 return ret; 701 return ret;
620} 702}
621 703
704static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
705 struct ttm_mem_type_manager *man,
706 struct ttm_placement *placement,
707 struct ttm_mem_reg *mem,
708 struct drm_mm_node **node)
709{
710 struct ttm_bo_global *glob = bo->glob;
711 unsigned long lpfn;
712 int ret;
713
714 lpfn = placement->lpfn;
715 if (!lpfn)
716 lpfn = man->size;
717 *node = NULL;
718 do {
719 ret = drm_mm_pre_get(&man->manager);
720 if (unlikely(ret))
721 return ret;
722
723 spin_lock(&glob->lru_lock);
724 *node = drm_mm_search_free_in_range(&man->manager,
725 mem->num_pages, mem->page_alignment,
726 placement->fpfn, lpfn, 1);
727 if (unlikely(*node == NULL)) {
728 spin_unlock(&glob->lru_lock);
729 return 0;
730 }
731 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
732 mem->page_alignment,
733 placement->fpfn,
734 lpfn);
735 spin_unlock(&glob->lru_lock);
736 } while (*node == NULL);
737 return 0;
738}
739
622/** 740/**
623 * Repeatedly evict memory from the LRU for @mem_type until we create enough 741 * Repeatedly evict memory from the LRU for @mem_type until we create enough
624 * space, or we've evicted everything and there isn't enough space. 742 * space, or we've evicted everything and there isn't enough space.
625 */ 743 */
626static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, 744static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
627 struct ttm_mem_reg *mem, 745 uint32_t mem_type,
628 uint32_t mem_type, 746 struct ttm_placement *placement,
629 bool interruptible, bool no_wait) 747 struct ttm_mem_reg *mem,
748 bool interruptible, bool no_wait)
630{ 749{
750 struct ttm_bo_device *bdev = bo->bdev;
631 struct ttm_bo_global *glob = bdev->glob; 751 struct ttm_bo_global *glob = bdev->glob;
632 struct drm_mm_node *node;
633 struct ttm_buffer_object *entry;
634 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 752 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
635 struct list_head *lru; 753 struct drm_mm_node *node;
636 unsigned long num_pages = mem->num_pages;
637 int put_count = 0;
638 int ret; 754 int ret;
639 755
640retry_pre_get:
641 ret = drm_mm_pre_get(&man->manager);
642 if (unlikely(ret != 0))
643 return ret;
644
645 spin_lock(&glob->lru_lock);
646 do { 756 do {
647 node = drm_mm_search_free(&man->manager, num_pages, 757 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
648 mem->page_alignment, 1); 758 if (unlikely(ret != 0))
759 return ret;
649 if (node) 760 if (node)
650 break; 761 break;
651 762 spin_lock(&glob->lru_lock);
652 lru = &man->lru; 763 if (list_empty(&man->lru)) {
653 if (list_empty(lru)) 764 spin_unlock(&glob->lru_lock);
654 break; 765 break;
655 766 }
656 entry = list_first_entry(lru, struct ttm_buffer_object, lru);
657 kref_get(&entry->list_kref);
658
659 ret =
660 ttm_bo_reserve_locked(entry, interruptible, no_wait,
661 false, 0);
662
663 if (likely(ret == 0))
664 put_count = ttm_bo_del_from_lru(entry);
665
666 spin_unlock(&glob->lru_lock); 767 spin_unlock(&glob->lru_lock);
667 768 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
769 no_wait);
668 if (unlikely(ret != 0)) 770 if (unlikely(ret != 0))
669 return ret; 771 return ret;
670
671 while (put_count--)
672 kref_put(&entry->list_kref, ttm_bo_ref_bug);
673
674 ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
675
676 ttm_bo_unreserve(entry);
677
678 kref_put(&entry->list_kref, ttm_bo_release_list);
679 if (ret)
680 return ret;
681
682 spin_lock(&glob->lru_lock);
683 } while (1); 772 } while (1);
684 773 if (node == NULL)
685 if (!node) {
686 spin_unlock(&glob->lru_lock);
687 return -ENOMEM; 774 return -ENOMEM;
688 }
689
690 node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
691 if (unlikely(!node)) {
692 spin_unlock(&glob->lru_lock);
693 goto retry_pre_get;
694 }
695
696 spin_unlock(&glob->lru_lock);
697 mem->mm_node = node; 775 mem->mm_node = node;
698 mem->mem_type = mem_type; 776 mem->mem_type = mem_type;
699 return 0; 777 return 0;
@@ -724,7 +802,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
724 return result; 802 return result;
725} 803}
726 804
727
728static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, 805static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
729 bool disallow_fixed, 806 bool disallow_fixed,
730 uint32_t mem_type, 807 uint32_t mem_type,
@@ -757,66 +834,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
757 * space. 834 * space.
758 */ 835 */
759int ttm_bo_mem_space(struct ttm_buffer_object *bo, 836int ttm_bo_mem_space(struct ttm_buffer_object *bo,
760 uint32_t proposed_placement, 837 struct ttm_placement *placement,
761 struct ttm_mem_reg *mem, 838 struct ttm_mem_reg *mem,
762 bool interruptible, bool no_wait) 839 bool interruptible, bool no_wait)
763{ 840{
764 struct ttm_bo_device *bdev = bo->bdev; 841 struct ttm_bo_device *bdev = bo->bdev;
765 struct ttm_bo_global *glob = bo->glob;
766 struct ttm_mem_type_manager *man; 842 struct ttm_mem_type_manager *man;
767
768 uint32_t num_prios = bdev->driver->num_mem_type_prio;
769 const uint32_t *prios = bdev->driver->mem_type_prio;
770 uint32_t i;
771 uint32_t mem_type = TTM_PL_SYSTEM; 843 uint32_t mem_type = TTM_PL_SYSTEM;
772 uint32_t cur_flags = 0; 844 uint32_t cur_flags = 0;
773 bool type_found = false; 845 bool type_found = false;
774 bool type_ok = false; 846 bool type_ok = false;
775 bool has_eagain = false; 847 bool has_erestartsys = false;
776 struct drm_mm_node *node = NULL; 848 struct drm_mm_node *node = NULL;
777 int ret; 849 int i, ret;
778 850
779 mem->mm_node = NULL; 851 mem->mm_node = NULL;
780 for (i = 0; i < num_prios; ++i) { 852 for (i = 0; i <= placement->num_placement; ++i) {
781 mem_type = prios[i]; 853 ret = ttm_mem_type_from_flags(placement->placement[i],
854 &mem_type);
855 if (ret)
856 return ret;
782 man = &bdev->man[mem_type]; 857 man = &bdev->man[mem_type];
783 858
784 type_ok = ttm_bo_mt_compatible(man, 859 type_ok = ttm_bo_mt_compatible(man,
785 bo->type == ttm_bo_type_user, 860 bo->type == ttm_bo_type_user,
786 mem_type, proposed_placement, 861 mem_type,
787 &cur_flags); 862 placement->placement[i],
863 &cur_flags);
788 864
789 if (!type_ok) 865 if (!type_ok)
790 continue; 866 continue;
791 867
792 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 868 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
793 cur_flags); 869 cur_flags);
870 /*
871 * Use the access and other non-mapping-related flag bits from
872 * the memory placement flags to the current flags
873 */
874 ttm_flag_masked(&cur_flags, placement->placement[i],
875 ~TTM_PL_MASK_MEMTYPE);
794 876
795 if (mem_type == TTM_PL_SYSTEM) 877 if (mem_type == TTM_PL_SYSTEM)
796 break; 878 break;
797 879
798 if (man->has_type && man->use_type) { 880 if (man->has_type && man->use_type) {
799 type_found = true; 881 type_found = true;
800 do { 882 ret = ttm_bo_man_get_node(bo, man, placement, mem,
801 ret = drm_mm_pre_get(&man->manager); 883 &node);
802 if (unlikely(ret)) 884 if (unlikely(ret))
803 return ret; 885 return ret;
804
805 spin_lock(&glob->lru_lock);
806 node = drm_mm_search_free(&man->manager,
807 mem->num_pages,
808 mem->page_alignment,
809 1);
810 if (unlikely(!node)) {
811 spin_unlock(&glob->lru_lock);
812 break;
813 }
814 node = drm_mm_get_block_atomic(node,
815 mem->num_pages,
816 mem->
817 page_alignment);
818 spin_unlock(&glob->lru_lock);
819 } while (!node);
820 } 886 }
821 if (node) 887 if (node)
822 break; 888 break;
@@ -826,67 +892,66 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
826 mem->mm_node = node; 892 mem->mm_node = node;
827 mem->mem_type = mem_type; 893 mem->mem_type = mem_type;
828 mem->placement = cur_flags; 894 mem->placement = cur_flags;
895 if (node)
896 node->private = bo;
829 return 0; 897 return 0;
830 } 898 }
831 899
832 if (!type_found) 900 if (!type_found)
833 return -EINVAL; 901 return -EINVAL;
834 902
835 num_prios = bdev->driver->num_mem_busy_prio; 903 for (i = 0; i <= placement->num_busy_placement; ++i) {
836 prios = bdev->driver->mem_busy_prio; 904 ret = ttm_mem_type_from_flags(placement->placement[i],
837 905 &mem_type);
838 for (i = 0; i < num_prios; ++i) { 906 if (ret)
839 mem_type = prios[i]; 907 return ret;
840 man = &bdev->man[mem_type]; 908 man = &bdev->man[mem_type];
841
842 if (!man->has_type) 909 if (!man->has_type)
843 continue; 910 continue;
844
845 if (!ttm_bo_mt_compatible(man, 911 if (!ttm_bo_mt_compatible(man,
846 bo->type == ttm_bo_type_user, 912 bo->type == ttm_bo_type_user,
847 mem_type, 913 mem_type,
848 proposed_placement, &cur_flags)) 914 placement->placement[i],
915 &cur_flags))
849 continue; 916 continue;
850 917
851 cur_flags = ttm_bo_select_caching(man, bo->mem.placement, 918 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
852 cur_flags); 919 cur_flags);
920 /*
921 * Use the access and other non-mapping-related flag bits from
922 * the memory placement flags to the current flags
923 */
924 ttm_flag_masked(&cur_flags, placement->placement[i],
925 ~TTM_PL_MASK_MEMTYPE);
853 926
854 ret = ttm_bo_mem_force_space(bdev, mem, mem_type, 927 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
855 interruptible, no_wait); 928 interruptible, no_wait);
856
857 if (ret == 0 && mem->mm_node) { 929 if (ret == 0 && mem->mm_node) {
858 mem->placement = cur_flags; 930 mem->placement = cur_flags;
931 mem->mm_node->private = bo;
859 return 0; 932 return 0;
860 } 933 }
861 934 if (ret == -ERESTARTSYS)
862 if (ret == -ERESTART) 935 has_erestartsys = true;
863 has_eagain = true;
864 } 936 }
865 937 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
866 ret = (has_eagain) ? -ERESTART : -ENOMEM;
867 return ret; 938 return ret;
868} 939}
869EXPORT_SYMBOL(ttm_bo_mem_space); 940EXPORT_SYMBOL(ttm_bo_mem_space);
870 941
871int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) 942int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
872{ 943{
873 int ret = 0;
874
875 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) 944 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
876 return -EBUSY; 945 return -EBUSY;
877 946
878 ret = wait_event_interruptible(bo->event_queue, 947 return wait_event_interruptible(bo->event_queue,
879 atomic_read(&bo->cpu_writers) == 0); 948 atomic_read(&bo->cpu_writers) == 0);
880
881 if (ret == -ERESTARTSYS)
882 ret = -ERESTART;
883
884 return ret;
885} 949}
950EXPORT_SYMBOL(ttm_bo_wait_cpu);
886 951
887int ttm_bo_move_buffer(struct ttm_buffer_object *bo, 952int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
888 uint32_t proposed_placement, 953 struct ttm_placement *placement,
889 bool interruptible, bool no_wait) 954 bool interruptible, bool no_wait)
890{ 955{
891 struct ttm_bo_global *glob = bo->glob; 956 struct ttm_bo_global *glob = bo->glob;
892 int ret = 0; 957 int ret = 0;
@@ -899,147 +964,132 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
899 * Have the driver move function wait for idle when necessary, 964 * Have the driver move function wait for idle when necessary,
900 * instead of doing it here. 965 * instead of doing it here.
901 */ 966 */
902
903 spin_lock(&bo->lock); 967 spin_lock(&bo->lock);
904 ret = ttm_bo_wait(bo, false, interruptible, no_wait); 968 ret = ttm_bo_wait(bo, false, interruptible, no_wait);
905 spin_unlock(&bo->lock); 969 spin_unlock(&bo->lock);
906
907 if (ret) 970 if (ret)
908 return ret; 971 return ret;
909
910 mem.num_pages = bo->num_pages; 972 mem.num_pages = bo->num_pages;
911 mem.size = mem.num_pages << PAGE_SHIFT; 973 mem.size = mem.num_pages << PAGE_SHIFT;
912 mem.page_alignment = bo->mem.page_alignment; 974 mem.page_alignment = bo->mem.page_alignment;
913
914 /* 975 /*
915 * Determine where to move the buffer. 976 * Determine where to move the buffer.
916 */ 977 */
917 978 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
918 ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
919 interruptible, no_wait);
920 if (ret) 979 if (ret)
921 goto out_unlock; 980 goto out_unlock;
922
923 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); 981 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
924
925out_unlock: 982out_unlock:
926 if (ret && mem.mm_node) { 983 if (ret && mem.mm_node) {
927 spin_lock(&glob->lru_lock); 984 spin_lock(&glob->lru_lock);
985 mem.mm_node->private = NULL;
928 drm_mm_put_block(mem.mm_node); 986 drm_mm_put_block(mem.mm_node);
929 spin_unlock(&glob->lru_lock); 987 spin_unlock(&glob->lru_lock);
930 } 988 }
931 return ret; 989 return ret;
932} 990}
933 991
934static int ttm_bo_mem_compat(uint32_t proposed_placement, 992static int ttm_bo_mem_compat(struct ttm_placement *placement,
935 struct ttm_mem_reg *mem) 993 struct ttm_mem_reg *mem)
936{ 994{
937 if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) 995 int i;
938 return 0; 996
939 if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) 997 for (i = 0; i < placement->num_placement; i++) {
940 return 0; 998 if ((placement->placement[i] & mem->placement &
941 999 TTM_PL_MASK_CACHING) &&
942 return 1; 1000 (placement->placement[i] & mem->placement &
1001 TTM_PL_MASK_MEM))
1002 return i;
1003 }
1004 return -1;
943} 1005}
944 1006
945int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 1007int ttm_bo_validate(struct ttm_buffer_object *bo,
946 uint32_t proposed_placement, 1008 struct ttm_placement *placement,
947 bool interruptible, bool no_wait) 1009 bool interruptible, bool no_wait)
948{ 1010{
949 int ret; 1011 int ret;
950 1012
951 BUG_ON(!atomic_read(&bo->reserved)); 1013 BUG_ON(!atomic_read(&bo->reserved));
952 bo->proposed_placement = proposed_placement; 1014 /* Check that range is valid */
953 1015 if (placement->lpfn || placement->fpfn)
954 TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", 1016 if (placement->fpfn > placement->lpfn ||
955 (unsigned long)proposed_placement, 1017 (placement->lpfn - placement->fpfn) < bo->num_pages)
956 (unsigned long)bo->mem.placement); 1018 return -EINVAL;
957
958 /* 1019 /*
959 * Check whether we need to move buffer. 1020 * Check whether we need to move buffer.
960 */ 1021 */
961 1022 ret = ttm_bo_mem_compat(placement, &bo->mem);
962 if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { 1023 if (ret < 0) {
963 ret = ttm_bo_move_buffer(bo, bo->proposed_placement, 1024 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
964 interruptible, no_wait); 1025 if (ret)
965 if (ret) {
966 if (ret != -ERESTART)
967 printk(KERN_ERR TTM_PFX
968 "Failed moving buffer. "
969 "Proposed placement 0x%08x\n",
970 bo->proposed_placement);
971 if (ret == -ENOMEM)
972 printk(KERN_ERR TTM_PFX
973 "Out of aperture space or "
974 "DRM memory quota.\n");
975 return ret; 1026 return ret;
976 } 1027 } else {
1028 /*
1029 * Use the access and other non-mapping-related flag bits from
1030 * the compatible memory placement flags to the active flags
1031 */
1032 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1033 ~TTM_PL_MASK_MEMTYPE);
977 } 1034 }
978
979 /* 1035 /*
980 * We might need to add a TTM. 1036 * We might need to add a TTM.
981 */ 1037 */
982
983 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 1038 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
984 ret = ttm_bo_add_ttm(bo, true); 1039 ret = ttm_bo_add_ttm(bo, true);
985 if (ret) 1040 if (ret)
986 return ret; 1041 return ret;
987 } 1042 }
988 /*
989 * Validation has succeeded, move the access and other
990 * non-mapping-related flag bits from the proposed flags to
991 * the active flags
992 */
993
994 ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
995 ~TTM_PL_MASK_MEMTYPE);
996
997 return 0; 1043 return 0;
998} 1044}
999EXPORT_SYMBOL(ttm_buffer_object_validate); 1045EXPORT_SYMBOL(ttm_bo_validate);
1000 1046
1001int 1047int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1002ttm_bo_check_placement(struct ttm_buffer_object *bo, 1048 struct ttm_placement *placement)
1003 uint32_t set_flags, uint32_t clr_flags)
1004{ 1049{
1005 uint32_t new_mask = set_flags | clr_flags; 1050 int i;
1006 1051
1007 if ((bo->type == ttm_bo_type_user) && 1052 if (placement->fpfn || placement->lpfn) {
1008 (clr_flags & TTM_PL_FLAG_CACHED)) { 1053 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1009 printk(KERN_ERR TTM_PFX 1054 printk(KERN_ERR TTM_PFX "Page number range to small "
1010 "User buffers require cache-coherent memory.\n"); 1055 "Need %lu pages, range is [%u, %u]\n",
1011 return -EINVAL; 1056 bo->mem.num_pages, placement->fpfn,
1012 } 1057 placement->lpfn);
1013
1014 if (!capable(CAP_SYS_ADMIN)) {
1015 if (new_mask & TTM_PL_FLAG_NO_EVICT) {
1016 printk(KERN_ERR TTM_PFX "Need to be root to modify"
1017 " NO_EVICT status.\n");
1018 return -EINVAL; 1058 return -EINVAL;
1019 } 1059 }
1020 1060 }
1021 if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && 1061 for (i = 0; i < placement->num_placement; i++) {
1022 (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { 1062 if (!capable(CAP_SYS_ADMIN)) {
1023 printk(KERN_ERR TTM_PFX 1063 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1024 "Incompatible memory specification" 1064 printk(KERN_ERR TTM_PFX "Need to be root to "
1025 " for NO_EVICT buffer.\n"); 1065 "modify NO_EVICT status.\n");
1026 return -EINVAL; 1066 return -EINVAL;
1067 }
1068 }
1069 }
1070 for (i = 0; i < placement->num_busy_placement; i++) {
1071 if (!capable(CAP_SYS_ADMIN)) {
1072 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1073 printk(KERN_ERR TTM_PFX "Need to be root to "
1074 "modify NO_EVICT status.\n");
1075 return -EINVAL;
1076 }
1027 } 1077 }
1028 } 1078 }
1029 return 0; 1079 return 0;
1030} 1080}
1031 1081
1032int ttm_buffer_object_init(struct ttm_bo_device *bdev, 1082int ttm_bo_init(struct ttm_bo_device *bdev,
1033 struct ttm_buffer_object *bo, 1083 struct ttm_buffer_object *bo,
1034 unsigned long size, 1084 unsigned long size,
1035 enum ttm_bo_type type, 1085 enum ttm_bo_type type,
1036 uint32_t flags, 1086 struct ttm_placement *placement,
1037 uint32_t page_alignment, 1087 uint32_t page_alignment,
1038 unsigned long buffer_start, 1088 unsigned long buffer_start,
1039 bool interruptible, 1089 bool interruptible,
1040 struct file *persistant_swap_storage, 1090 struct file *persistant_swap_storage,
1041 size_t acc_size, 1091 size_t acc_size,
1042 void (*destroy) (struct ttm_buffer_object *)) 1092 void (*destroy) (struct ttm_buffer_object *))
1043{ 1093{
1044 int ret = 0; 1094 int ret = 0;
1045 unsigned long num_pages; 1095 unsigned long num_pages;
@@ -1077,29 +1127,21 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
1077 bo->acc_size = acc_size; 1127 bo->acc_size = acc_size;
1078 atomic_inc(&bo->glob->bo_count); 1128 atomic_inc(&bo->glob->bo_count);
1079 1129
1080 ret = ttm_bo_check_placement(bo, flags, 0ULL); 1130 ret = ttm_bo_check_placement(bo, placement);
1081 if (unlikely(ret != 0)) 1131 if (unlikely(ret != 0))
1082 goto out_err; 1132 goto out_err;
1083 1133
1084 /* 1134 /*
1085 * If no caching attributes are set, accept any form of caching.
1086 */
1087
1088 if ((flags & TTM_PL_MASK_CACHING) == 0)
1089 flags |= TTM_PL_MASK_CACHING;
1090
1091 /*
1092 * For ttm_bo_type_device buffers, allocate 1135 * For ttm_bo_type_device buffers, allocate
1093 * address space from the device. 1136 * address space from the device.
1094 */ 1137 */
1095
1096 if (bo->type == ttm_bo_type_device) { 1138 if (bo->type == ttm_bo_type_device) {
1097 ret = ttm_bo_setup_vm(bo); 1139 ret = ttm_bo_setup_vm(bo);
1098 if (ret) 1140 if (ret)
1099 goto out_err; 1141 goto out_err;
1100 } 1142 }
1101 1143
1102 ret = ttm_buffer_object_validate(bo, flags, interruptible, false); 1144 ret = ttm_bo_validate(bo, placement, interruptible, false);
1103 if (ret) 1145 if (ret)
1104 goto out_err; 1146 goto out_err;
1105 1147
@@ -1112,7 +1154,7 @@ out_err:
1112 1154
1113 return ret; 1155 return ret;
1114} 1156}
1115EXPORT_SYMBOL(ttm_buffer_object_init); 1157EXPORT_SYMBOL(ttm_bo_init);
1116 1158
1117static inline size_t ttm_bo_size(struct ttm_bo_global *glob, 1159static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1118 unsigned long num_pages) 1160 unsigned long num_pages)
@@ -1123,19 +1165,19 @@ static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1123 return glob->ttm_bo_size + 2 * page_array_size; 1165 return glob->ttm_bo_size + 2 * page_array_size;
1124} 1166}
1125 1167
1126int ttm_buffer_object_create(struct ttm_bo_device *bdev, 1168int ttm_bo_create(struct ttm_bo_device *bdev,
1127 unsigned long size, 1169 unsigned long size,
1128 enum ttm_bo_type type, 1170 enum ttm_bo_type type,
1129 uint32_t flags, 1171 struct ttm_placement *placement,
1130 uint32_t page_alignment, 1172 uint32_t page_alignment,
1131 unsigned long buffer_start, 1173 unsigned long buffer_start,
1132 bool interruptible, 1174 bool interruptible,
1133 struct file *persistant_swap_storage, 1175 struct file *persistant_swap_storage,
1134 struct ttm_buffer_object **p_bo) 1176 struct ttm_buffer_object **p_bo)
1135{ 1177{
1136 struct ttm_buffer_object *bo; 1178 struct ttm_buffer_object *bo;
1137 int ret;
1138 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; 1179 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1180 int ret;
1139 1181
1140 size_t acc_size = 1182 size_t acc_size =
1141 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); 1183 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1150,76 +1192,41 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
1150 return -ENOMEM; 1192 return -ENOMEM;
1151 } 1193 }
1152 1194
1153 ret = ttm_buffer_object_init(bdev, bo, size, type, flags, 1195 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1154 page_alignment, buffer_start, 1196 buffer_start, interruptible,
1155 interruptible, 1197 persistant_swap_storage, acc_size, NULL);
1156 persistant_swap_storage, acc_size, NULL);
1157 if (likely(ret == 0)) 1198 if (likely(ret == 0))
1158 *p_bo = bo; 1199 *p_bo = bo;
1159 1200
1160 return ret; 1201 return ret;
1161} 1202}
1162 1203
1163static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
1164 uint32_t mem_type, bool allow_errors)
1165{
1166 int ret;
1167
1168 spin_lock(&bo->lock);
1169 ret = ttm_bo_wait(bo, false, false, false);
1170 spin_unlock(&bo->lock);
1171
1172 if (ret && allow_errors)
1173 goto out;
1174
1175 if (bo->mem.mem_type == mem_type)
1176 ret = ttm_bo_evict(bo, mem_type, false, false);
1177
1178 if (ret) {
1179 if (allow_errors) {
1180 goto out;
1181 } else {
1182 ret = 0;
1183 printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
1184 }
1185 }
1186
1187out:
1188 return ret;
1189}
1190
1191static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, 1204static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1192 struct list_head *head, 1205 unsigned mem_type, bool allow_errors)
1193 unsigned mem_type, bool allow_errors)
1194{ 1206{
1207 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1195 struct ttm_bo_global *glob = bdev->glob; 1208 struct ttm_bo_global *glob = bdev->glob;
1196 struct ttm_buffer_object *entry;
1197 int ret; 1209 int ret;
1198 int put_count;
1199 1210
1200 /* 1211 /*
1201 * Can't use standard list traversal since we're unlocking. 1212 * Can't use standard list traversal since we're unlocking.
1202 */ 1213 */
1203 1214
1204 spin_lock(&glob->lru_lock); 1215 spin_lock(&glob->lru_lock);
1205 1216 while (!list_empty(&man->lru)) {
1206 while (!list_empty(head)) {
1207 entry = list_first_entry(head, struct ttm_buffer_object, lru);
1208 kref_get(&entry->list_kref);
1209 ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
1210 put_count = ttm_bo_del_from_lru(entry);
1211 spin_unlock(&glob->lru_lock); 1217 spin_unlock(&glob->lru_lock);
1212 while (put_count--) 1218 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1213 kref_put(&entry->list_kref, ttm_bo_ref_bug); 1219 if (ret) {
1214 BUG_ON(ret); 1220 if (allow_errors) {
1215 ret = ttm_bo_leave_list(entry, mem_type, allow_errors); 1221 return ret;
1216 ttm_bo_unreserve(entry); 1222 } else {
1217 kref_put(&entry->list_kref, ttm_bo_release_list); 1223 printk(KERN_ERR TTM_PFX
1224 "Cleanup eviction failed\n");
1225 }
1226 }
1218 spin_lock(&glob->lru_lock); 1227 spin_lock(&glob->lru_lock);
1219 } 1228 }
1220
1221 spin_unlock(&glob->lru_lock); 1229 spin_unlock(&glob->lru_lock);
1222
1223 return 0; 1230 return 0;
1224} 1231}
1225 1232
@@ -1246,7 +1253,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1246 1253
1247 ret = 0; 1254 ret = 0;
1248 if (mem_type > 0) { 1255 if (mem_type > 0) {
1249 ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); 1256 ttm_bo_force_list_clean(bdev, mem_type, false);
1250 1257
1251 spin_lock(&glob->lru_lock); 1258 spin_lock(&glob->lru_lock);
1252 if (drm_mm_clean(&man->manager)) 1259 if (drm_mm_clean(&man->manager))
@@ -1279,12 +1286,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1279 return 0; 1286 return 0;
1280 } 1287 }
1281 1288
1282 return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); 1289 return ttm_bo_force_list_clean(bdev, mem_type, true);
1283} 1290}
1284EXPORT_SYMBOL(ttm_bo_evict_mm); 1291EXPORT_SYMBOL(ttm_bo_evict_mm);
1285 1292
1286int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 1293int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1287 unsigned long p_offset, unsigned long p_size) 1294 unsigned long p_size)
1288{ 1295{
1289 int ret = -EINVAL; 1296 int ret = -EINVAL;
1290 struct ttm_mem_type_manager *man; 1297 struct ttm_mem_type_manager *man;
@@ -1314,7 +1321,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1314 type); 1321 type);
1315 return ret; 1322 return ret;
1316 } 1323 }
1317 ret = drm_mm_init(&man->manager, p_offset, p_size); 1324 ret = drm_mm_init(&man->manager, 0, p_size);
1318 if (ret) 1325 if (ret)
1319 return ret; 1326 return ret;
1320 } 1327 }
@@ -1463,7 +1470,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
1463 * Initialize the system memory buffer type. 1470 * Initialize the system memory buffer type.
1464 * Other types need to be driver / IOCTL initialized. 1471 * Other types need to be driver / IOCTL initialized.
1465 */ 1472 */
1466 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); 1473 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1467 if (unlikely(ret != 0)) 1474 if (unlikely(ret != 0))
1468 goto out_no_sys; 1475 goto out_no_sys;
1469 1476
@@ -1693,7 +1700,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1693 ret = wait_event_interruptible 1700 ret = wait_event_interruptible
1694 (bo->event_queue, atomic_read(&bo->reserved) == 0); 1701 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1695 if (unlikely(ret != 0)) 1702 if (unlikely(ret != 0))
1696 return -ERESTART; 1703 return ret;
1697 } else { 1704 } else {
1698 wait_event(bo->event_queue, 1705 wait_event(bo->event_queue,
1699 atomic_read(&bo->reserved) == 0); 1706 atomic_read(&bo->reserved) == 0);
@@ -1722,12 +1729,14 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1722 ttm_bo_unreserve(bo); 1729 ttm_bo_unreserve(bo);
1723 return ret; 1730 return ret;
1724} 1731}
1732EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1725 1733
1726void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) 1734void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1727{ 1735{
1728 if (atomic_dec_and_test(&bo->cpu_writers)) 1736 if (atomic_dec_and_test(&bo->cpu_writers))
1729 wake_up_all(&bo->event_queue); 1737 wake_up_all(&bo->event_queue);
1730} 1738}
1739EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1731 1740
1732/** 1741/**
1733 * A buffer object shrink method that tries to swap out the first 1742 * A buffer object shrink method that tries to swap out the first
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 61c5572d2b91..2ecf7d0c64f6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
369#endif 369#endif
370 return tmp; 370 return tmp;
371} 371}
372EXPORT_SYMBOL(ttm_io_prot);
372 373
373static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 374static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
374 unsigned long bus_base, 375 unsigned long bus_base,
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 1c040d040338..609a85a4d855 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
114 ret = ttm_bo_wait(bo, false, true, false); 114 ret = ttm_bo_wait(bo, false, true, false);
115 spin_unlock(&bo->lock); 115 spin_unlock(&bo->lock);
116 if (unlikely(ret != 0)) { 116 if (unlikely(ret != 0)) {
117 retval = (ret != -ERESTART) ? 117 retval = (ret != -ERESTARTSYS) ?
118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; 118 VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
119 goto out_unlock; 119 goto out_unlock;
120 } 120 }
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
349 switch (ret) { 349 switch (ret) {
350 case 0: 350 case 0:
351 break; 351 break;
352 case -ERESTART:
353 ret = -EINTR;
354 goto out_unref;
355 case -EBUSY: 352 case -EBUSY:
356 ret = -EAGAIN; 353 ret = -EAGAIN;
357 goto out_unref; 354 goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
421 switch (ret) { 418 switch (ret) {
422 case 0: 419 case 0:
423 break; 420 break;
424 case -ERESTART:
425 return -EINTR;
426 case -EBUSY: 421 case -EBUSY:
427 return -EAGAIN; 422 return -EAGAIN;
428 default: 423 default:
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
new file mode 100644
index 000000000000..c285c2902d15
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -0,0 +1,117 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "ttm/ttm_execbuf_util.h"
29#include "ttm/ttm_bo_driver.h"
30#include "ttm/ttm_placement.h"
31#include <linux/wait.h>
32#include <linux/sched.h>
33#include <linux/module.h>
34
35void ttm_eu_backoff_reservation(struct list_head *list)
36{
37 struct ttm_validate_buffer *entry;
38
39 list_for_each_entry(entry, list, head) {
40 struct ttm_buffer_object *bo = entry->bo;
41 if (!entry->reserved)
42 continue;
43
44 entry->reserved = false;
45 ttm_bo_unreserve(bo);
46 }
47}
48EXPORT_SYMBOL(ttm_eu_backoff_reservation);
49
50/*
51 * Reserve buffers for validation.
52 *
53 * If a buffer in the list is marked for CPU access, we back off and
54 * wait for that buffer to become free for GPU access.
55 *
56 * If a buffer is reserved for another validation, the validator with
57 * the highest validation sequence backs off and waits for that buffer
58 * to become unreserved. This prevents deadlocks when validating multiple
59 * buffers in different orders.
60 */
61
62int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
63{
64 struct ttm_validate_buffer *entry;
65 int ret;
66
67retry:
68 list_for_each_entry(entry, list, head) {
69 struct ttm_buffer_object *bo = entry->bo;
70
71 entry->reserved = false;
72 ret = ttm_bo_reserve(bo, true, false, true, val_seq);
73 if (ret != 0) {
74 ttm_eu_backoff_reservation(list);
75 if (ret == -EAGAIN) {
76 ret = ttm_bo_wait_unreserved(bo, true);
77 if (unlikely(ret != 0))
78 return ret;
79 goto retry;
80 } else
81 return ret;
82 }
83
84 entry->reserved = true;
85 if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
86 ttm_eu_backoff_reservation(list);
87 ret = ttm_bo_wait_cpu(bo, false);
88 if (ret)
89 return ret;
90 goto retry;
91 }
92 }
93 return 0;
94}
95EXPORT_SYMBOL(ttm_eu_reserve_buffers);
96
97void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
98{
99 struct ttm_validate_buffer *entry;
100
101 list_for_each_entry(entry, list, head) {
102 struct ttm_buffer_object *bo = entry->bo;
103 struct ttm_bo_driver *driver = bo->bdev->driver;
104 void *old_sync_obj;
105
106 spin_lock(&bo->lock);
107 old_sync_obj = bo->sync_obj;
108 bo->sync_obj = driver->sync_obj_ref(sync_obj);
109 bo->sync_obj_arg = entry->new_sync_obj_arg;
110 spin_unlock(&bo->lock);
111 ttm_bo_unreserve(bo);
112 entry->reserved = false;
113 if (old_sync_obj)
114 driver->sync_obj_unref(&old_sync_obj);
115 }
116}
117EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);
diff --git a/drivers/gpu/drm/ttm/ttm_lock.c b/drivers/gpu/drm/ttm/ttm_lock.c
new file mode 100644
index 000000000000..f619ebcaa4ec
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_lock.c
@@ -0,0 +1,311 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#include "ttm/ttm_lock.h"
32#include "ttm/ttm_module.h"
33#include <asm/atomic.h>
34#include <linux/errno.h>
35#include <linux/wait.h>
36#include <linux/sched.h>
37#include <linux/module.h>
38
39#define TTM_WRITE_LOCK_PENDING (1 << 0)
40#define TTM_VT_LOCK_PENDING (1 << 1)
41#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
42#define TTM_VT_LOCK (1 << 3)
43#define TTM_SUSPEND_LOCK (1 << 4)
44
45void ttm_lock_init(struct ttm_lock *lock)
46{
47 spin_lock_init(&lock->lock);
48 init_waitqueue_head(&lock->queue);
49 lock->rw = 0;
50 lock->flags = 0;
51 lock->kill_takers = false;
52 lock->signal = SIGKILL;
53}
54EXPORT_SYMBOL(ttm_lock_init);
55
56void ttm_read_unlock(struct ttm_lock *lock)
57{
58 spin_lock(&lock->lock);
59 if (--lock->rw == 0)
60 wake_up_all(&lock->queue);
61 spin_unlock(&lock->lock);
62}
63EXPORT_SYMBOL(ttm_read_unlock);
64
65static bool __ttm_read_lock(struct ttm_lock *lock)
66{
67 bool locked = false;
68
69 spin_lock(&lock->lock);
70 if (unlikely(lock->kill_takers)) {
71 send_sig(lock->signal, current, 0);
72 spin_unlock(&lock->lock);
73 return false;
74 }
75 if (lock->rw >= 0 && lock->flags == 0) {
76 ++lock->rw;
77 locked = true;
78 }
79 spin_unlock(&lock->lock);
80 return locked;
81}
82
83int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
84{
85 int ret = 0;
86
87 if (interruptible)
88 ret = wait_event_interruptible(lock->queue,
89 __ttm_read_lock(lock));
90 else
91 wait_event(lock->queue, __ttm_read_lock(lock));
92 return ret;
93}
94EXPORT_SYMBOL(ttm_read_lock);
95
96static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
97{
98 bool block = true;
99
100 *locked = false;
101
102 spin_lock(&lock->lock);
103 if (unlikely(lock->kill_takers)) {
104 send_sig(lock->signal, current, 0);
105 spin_unlock(&lock->lock);
106 return false;
107 }
108 if (lock->rw >= 0 && lock->flags == 0) {
109 ++lock->rw;
110 block = false;
111 *locked = true;
112 } else if (lock->flags == 0) {
113 block = false;
114 }
115 spin_unlock(&lock->lock);
116
117 return !block;
118}
119
120int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
121{
122 int ret = 0;
123 bool locked;
124
125 if (interruptible)
126 ret = wait_event_interruptible
127 (lock->queue, __ttm_read_trylock(lock, &locked));
128 else
129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
130
131 if (unlikely(ret != 0)) {
132 BUG_ON(locked);
133 return ret;
134 }
135
136 return (locked) ? 0 : -EBUSY;
137}
138
139void ttm_write_unlock(struct ttm_lock *lock)
140{
141 spin_lock(&lock->lock);
142 lock->rw = 0;
143 wake_up_all(&lock->queue);
144 spin_unlock(&lock->lock);
145}
146EXPORT_SYMBOL(ttm_write_unlock);
147
148static bool __ttm_write_lock(struct ttm_lock *lock)
149{
150 bool locked = false;
151
152 spin_lock(&lock->lock);
153 if (unlikely(lock->kill_takers)) {
154 send_sig(lock->signal, current, 0);
155 spin_unlock(&lock->lock);
156 return false;
157 }
158 if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
159 lock->rw = -1;
160 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
161 locked = true;
162 } else {
163 lock->flags |= TTM_WRITE_LOCK_PENDING;
164 }
165 spin_unlock(&lock->lock);
166 return locked;
167}
168
169int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
170{
171 int ret = 0;
172
173 if (interruptible) {
174 ret = wait_event_interruptible(lock->queue,
175 __ttm_write_lock(lock));
176 if (unlikely(ret != 0)) {
177 spin_lock(&lock->lock);
178 lock->flags &= ~TTM_WRITE_LOCK_PENDING;
179 wake_up_all(&lock->queue);
180 spin_unlock(&lock->lock);
181 }
182 } else
183 wait_event(lock->queue, __ttm_read_lock(lock));
184
185 return ret;
186}
187EXPORT_SYMBOL(ttm_write_lock);
188
189void ttm_write_lock_downgrade(struct ttm_lock *lock)
190{
191 spin_lock(&lock->lock);
192 lock->rw = 1;
193 wake_up_all(&lock->queue);
194 spin_unlock(&lock->lock);
195}
196
197static int __ttm_vt_unlock(struct ttm_lock *lock)
198{
199 int ret = 0;
200
201 spin_lock(&lock->lock);
202 if (unlikely(!(lock->flags & TTM_VT_LOCK)))
203 ret = -EINVAL;
204 lock->flags &= ~TTM_VT_LOCK;
205 wake_up_all(&lock->queue);
206 spin_unlock(&lock->lock);
207 printk(KERN_INFO TTM_PFX "vt unlock.\n");
208
209 return ret;
210}
211
212static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
213{
214 struct ttm_base_object *base = *p_base;
215 struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
216 int ret;
217
218 *p_base = NULL;
219 ret = __ttm_vt_unlock(lock);
220 BUG_ON(ret != 0);
221}
222
223static bool __ttm_vt_lock(struct ttm_lock *lock)
224{
225 bool locked = false;
226
227 spin_lock(&lock->lock);
228 if (lock->rw == 0) {
229 lock->flags &= ~TTM_VT_LOCK_PENDING;
230 lock->flags |= TTM_VT_LOCK;
231 locked = true;
232 } else {
233 lock->flags |= TTM_VT_LOCK_PENDING;
234 }
235 spin_unlock(&lock->lock);
236 return locked;
237}
238
239int ttm_vt_lock(struct ttm_lock *lock,
240 bool interruptible,
241 struct ttm_object_file *tfile)
242{
243 int ret = 0;
244
245 if (interruptible) {
246 ret = wait_event_interruptible(lock->queue,
247 __ttm_vt_lock(lock));
248 if (unlikely(ret != 0)) {
249 spin_lock(&lock->lock);
250 lock->flags &= ~TTM_VT_LOCK_PENDING;
251 wake_up_all(&lock->queue);
252 spin_unlock(&lock->lock);
253 return ret;
254 }
255 } else
256 wait_event(lock->queue, __ttm_vt_lock(lock));
257
258 /*
259 * Add a base-object, the destructor of which will
260 * make sure the lock is released if the client dies
261 * while holding it.
262 */
263
264 ret = ttm_base_object_init(tfile, &lock->base, false,
265 ttm_lock_type, &ttm_vt_lock_remove, NULL);
266 if (ret)
267 (void)__ttm_vt_unlock(lock);
268 else {
269 lock->vt_holder = tfile;
270 printk(KERN_INFO TTM_PFX "vt lock.\n");
271 }
272
273 return ret;
274}
275EXPORT_SYMBOL(ttm_vt_lock);
276
277int ttm_vt_unlock(struct ttm_lock *lock)
278{
279 return ttm_ref_object_base_unref(lock->vt_holder,
280 lock->base.hash.key, TTM_REF_USAGE);
281}
282EXPORT_SYMBOL(ttm_vt_unlock);
283
284void ttm_suspend_unlock(struct ttm_lock *lock)
285{
286 spin_lock(&lock->lock);
287 lock->flags &= ~TTM_SUSPEND_LOCK;
288 wake_up_all(&lock->queue);
289 spin_unlock(&lock->lock);
290}
291
292static bool __ttm_suspend_lock(struct ttm_lock *lock)
293{
294 bool locked = false;
295
296 spin_lock(&lock->lock);
297 if (lock->rw == 0) {
298 lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
299 lock->flags |= TTM_SUSPEND_LOCK;
300 locked = true;
301 } else {
302 lock->flags |= TTM_SUSPEND_LOCK_PENDING;
303 }
304 spin_unlock(&lock->lock);
305 return locked;
306}
307
308void ttm_suspend_lock(struct ttm_lock *lock)
309{
310 wait_event(lock->queue, __ttm_suspend_lock(lock));
311}
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 072c281a6bb5..f5245c02b8fd 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -274,16 +274,17 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
274static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob, 274static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
275 const struct sysinfo *si) 275 const struct sysinfo *si)
276{ 276{
277 struct ttm_mem_zone *zone = kzalloc(sizeof(*zone), GFP_KERNEL); 277 struct ttm_mem_zone *zone;
278 uint64_t mem; 278 uint64_t mem;
279 int ret; 279 int ret;
280 280
281 if (unlikely(!zone))
282 return -ENOMEM;
283
284 if (si->totalhigh == 0) 281 if (si->totalhigh == 0)
285 return 0; 282 return 0;
286 283
284 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
285 if (unlikely(!zone))
286 return -ENOMEM;
287
287 mem = si->totalram; 288 mem = si->totalram;
288 mem *= si->mem_unit; 289 mem *= si->mem_unit;
289 290
@@ -322,8 +323,10 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
322 * No special dma32 zone needed. 323 * No special dma32 zone needed.
323 */ 324 */
324 325
325 if (mem <= ((uint64_t) 1ULL << 32)) 326 if (mem <= ((uint64_t) 1ULL << 32)) {
327 kfree(zone);
326 return 0; 328 return 0;
329 }
327 330
328 /* 331 /*
329 * Limit max dma32 memory to 4GB for now 332 * Limit max dma32 memory to 4GB for now
@@ -460,6 +463,7 @@ void ttm_mem_global_free(struct ttm_mem_global *glob,
460{ 463{
461 return ttm_mem_global_free_zone(glob, NULL, amount); 464 return ttm_mem_global_free_zone(glob, NULL, amount);
462} 465}
466EXPORT_SYMBOL(ttm_mem_global_free);
463 467
464static int ttm_mem_global_reserve(struct ttm_mem_global *glob, 468static int ttm_mem_global_reserve(struct ttm_mem_global *glob,
465 struct ttm_mem_zone *single_zone, 469 struct ttm_mem_zone *single_zone,
@@ -533,6 +537,7 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
533 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait, 537 return ttm_mem_global_alloc_zone(glob, NULL, memory, no_wait,
534 interruptible); 538 interruptible);
535} 539}
540EXPORT_SYMBOL(ttm_mem_global_alloc);
536 541
537int ttm_mem_global_alloc_page(struct ttm_mem_global *glob, 542int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
538 struct page *page, 543 struct page *page,
@@ -588,3 +593,4 @@ size_t ttm_round_pot(size_t size)
588 } 593 }
589 return 0; 594 return 0;
590} 595}
596EXPORT_SYMBOL(ttm_round_pot);
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
new file mode 100644
index 000000000000..1099abac824b
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -0,0 +1,452 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_ref_object.c
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37/**
38 * struct ttm_object_file
39 *
40 * @tdev: Pointer to the ttm_object_device.
41 *
42 * @lock: Lock that protects the ref_list list and the
43 * ref_hash hash tables.
44 *
45 * @ref_list: List of ttm_ref_objects to be destroyed at
46 * file release.
47 *
48 * @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
49 * for fast lookup of ref objects given a base object.
50 */
51
52#include "ttm/ttm_object.h"
53#include "ttm/ttm_module.h"
54#include <linux/list.h>
55#include <linux/spinlock.h>
56#include <linux/slab.h>
57#include <linux/module.h>
58#include <asm/atomic.h>
59
60struct ttm_object_file {
61 struct ttm_object_device *tdev;
62 rwlock_t lock;
63 struct list_head ref_list;
64 struct drm_open_hash ref_hash[TTM_REF_NUM];
65 struct kref refcount;
66};
67
68/**
69 * struct ttm_object_device
70 *
71 * @object_lock: lock that protects the object_hash hash table.
72 *
73 * @object_hash: hash table for fast lookup of object global names.
74 *
75 * @object_count: Per device object count.
76 *
77 * This is the per-device data structure needed for ttm object management.
78 */
79
80struct ttm_object_device {
81 rwlock_t object_lock;
82 struct drm_open_hash object_hash;
83 atomic_t object_count;
84 struct ttm_mem_global *mem_glob;
85};
86
87/**
88 * struct ttm_ref_object
89 *
90 * @hash: Hash entry for the per-file object reference hash.
91 *
92 * @head: List entry for the per-file list of ref-objects.
93 *
94 * @kref: Ref count.
95 *
96 * @obj: Base object this ref object is referencing.
97 *
98 * @ref_type: Type of ref object.
99 *
100 * This is similar to an idr object, but it also has a hash table entry
101 * that allows lookup with a pointer to the referenced object as a key. In
102 * that way, one can easily detect whether a base object is referenced by
103 * a particular ttm_object_file. It also carries a ref count to avoid creating
104 * multiple ref objects if a ttm_object_file references the same base
105 * object more than once.
106 */
107
108struct ttm_ref_object {
109 struct drm_hash_item hash;
110 struct list_head head;
111 struct kref kref;
112 struct ttm_base_object *obj;
113 enum ttm_ref_type ref_type;
114 struct ttm_object_file *tfile;
115};
116
117static inline struct ttm_object_file *
118ttm_object_file_ref(struct ttm_object_file *tfile)
119{
120 kref_get(&tfile->refcount);
121 return tfile;
122}
123
124static void ttm_object_file_destroy(struct kref *kref)
125{
126 struct ttm_object_file *tfile =
127 container_of(kref, struct ttm_object_file, refcount);
128
129 kfree(tfile);
130}
131
132
133static inline void ttm_object_file_unref(struct ttm_object_file **p_tfile)
134{
135 struct ttm_object_file *tfile = *p_tfile;
136
137 *p_tfile = NULL;
138 kref_put(&tfile->refcount, ttm_object_file_destroy);
139}
140
141
142int ttm_base_object_init(struct ttm_object_file *tfile,
143 struct ttm_base_object *base,
144 bool shareable,
145 enum ttm_object_type object_type,
146 void (*refcount_release) (struct ttm_base_object **),
147 void (*ref_obj_release) (struct ttm_base_object *,
148 enum ttm_ref_type ref_type))
149{
150 struct ttm_object_device *tdev = tfile->tdev;
151 int ret;
152
153 base->shareable = shareable;
154 base->tfile = ttm_object_file_ref(tfile);
155 base->refcount_release = refcount_release;
156 base->ref_obj_release = ref_obj_release;
157 base->object_type = object_type;
158 write_lock(&tdev->object_lock);
159 kref_init(&base->refcount);
160 ret = drm_ht_just_insert_please(&tdev->object_hash,
161 &base->hash,
162 (unsigned long)base, 31, 0, 0);
163 write_unlock(&tdev->object_lock);
164 if (unlikely(ret != 0))
165 goto out_err0;
166
167 ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
168 if (unlikely(ret != 0))
169 goto out_err1;
170
171 ttm_base_object_unref(&base);
172
173 return 0;
174out_err1:
175 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
176out_err0:
177 return ret;
178}
179EXPORT_SYMBOL(ttm_base_object_init);
180
181static void ttm_release_base(struct kref *kref)
182{
183 struct ttm_base_object *base =
184 container_of(kref, struct ttm_base_object, refcount);
185 struct ttm_object_device *tdev = base->tfile->tdev;
186
187 (void)drm_ht_remove_item(&tdev->object_hash, &base->hash);
188 write_unlock(&tdev->object_lock);
189 if (base->refcount_release) {
190 ttm_object_file_unref(&base->tfile);
191 base->refcount_release(&base);
192 }
193 write_lock(&tdev->object_lock);
194}
195
196void ttm_base_object_unref(struct ttm_base_object **p_base)
197{
198 struct ttm_base_object *base = *p_base;
199 struct ttm_object_device *tdev = base->tfile->tdev;
200
201 *p_base = NULL;
202
203 /*
204 * Need to take the lock here to avoid racing with
205 * users trying to look up the object.
206 */
207
208 write_lock(&tdev->object_lock);
209 (void)kref_put(&base->refcount, &ttm_release_base);
210 write_unlock(&tdev->object_lock);
211}
212EXPORT_SYMBOL(ttm_base_object_unref);
213
214struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
215 uint32_t key)
216{
217 struct ttm_object_device *tdev = tfile->tdev;
218 struct ttm_base_object *base;
219 struct drm_hash_item *hash;
220 int ret;
221
222 read_lock(&tdev->object_lock);
223 ret = drm_ht_find_item(&tdev->object_hash, key, &hash);
224
225 if (likely(ret == 0)) {
226 base = drm_hash_entry(hash, struct ttm_base_object, hash);
227 kref_get(&base->refcount);
228 }
229 read_unlock(&tdev->object_lock);
230
231 if (unlikely(ret != 0))
232 return NULL;
233
234 if (tfile != base->tfile && !base->shareable) {
235 printk(KERN_ERR TTM_PFX
236 "Attempted access of non-shareable object.\n");
237 ttm_base_object_unref(&base);
238 return NULL;
239 }
240
241 return base;
242}
243EXPORT_SYMBOL(ttm_base_object_lookup);
244
245int ttm_ref_object_add(struct ttm_object_file *tfile,
246 struct ttm_base_object *base,
247 enum ttm_ref_type ref_type, bool *existed)
248{
249 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
250 struct ttm_ref_object *ref;
251 struct drm_hash_item *hash;
252 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
253 int ret = -EINVAL;
254
255 if (existed != NULL)
256 *existed = true;
257
258 while (ret == -EINVAL) {
259 read_lock(&tfile->lock);
260 ret = drm_ht_find_item(ht, base->hash.key, &hash);
261
262 if (ret == 0) {
263 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
264 kref_get(&ref->kref);
265 read_unlock(&tfile->lock);
266 break;
267 }
268
269 read_unlock(&tfile->lock);
270 ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
271 false, false);
272 if (unlikely(ret != 0))
273 return ret;
274 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
275 if (unlikely(ref == NULL)) {
276 ttm_mem_global_free(mem_glob, sizeof(*ref));
277 return -ENOMEM;
278 }
279
280 ref->hash.key = base->hash.key;
281 ref->obj = base;
282 ref->tfile = tfile;
283 ref->ref_type = ref_type;
284 kref_init(&ref->kref);
285
286 write_lock(&tfile->lock);
287 ret = drm_ht_insert_item(ht, &ref->hash);
288
289 if (likely(ret == 0)) {
290 list_add_tail(&ref->head, &tfile->ref_list);
291 kref_get(&base->refcount);
292 write_unlock(&tfile->lock);
293 if (existed != NULL)
294 *existed = false;
295 break;
296 }
297
298 write_unlock(&tfile->lock);
299 BUG_ON(ret != -EINVAL);
300
301 ttm_mem_global_free(mem_glob, sizeof(*ref));
302 kfree(ref);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(ttm_ref_object_add);
308
309static void ttm_ref_object_release(struct kref *kref)
310{
311 struct ttm_ref_object *ref =
312 container_of(kref, struct ttm_ref_object, kref);
313 struct ttm_base_object *base = ref->obj;
314 struct ttm_object_file *tfile = ref->tfile;
315 struct drm_open_hash *ht;
316 struct ttm_mem_global *mem_glob = tfile->tdev->mem_glob;
317
318 ht = &tfile->ref_hash[ref->ref_type];
319 (void)drm_ht_remove_item(ht, &ref->hash);
320 list_del(&ref->head);
321 write_unlock(&tfile->lock);
322
323 if (ref->ref_type != TTM_REF_USAGE && base->ref_obj_release)
324 base->ref_obj_release(base, ref->ref_type);
325
326 ttm_base_object_unref(&ref->obj);
327 ttm_mem_global_free(mem_glob, sizeof(*ref));
328 kfree(ref);
329 write_lock(&tfile->lock);
330}
331
332int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
333 unsigned long key, enum ttm_ref_type ref_type)
334{
335 struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
336 struct ttm_ref_object *ref;
337 struct drm_hash_item *hash;
338 int ret;
339
340 write_lock(&tfile->lock);
341 ret = drm_ht_find_item(ht, key, &hash);
342 if (unlikely(ret != 0)) {
343 write_unlock(&tfile->lock);
344 return -EINVAL;
345 }
346 ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
347 kref_put(&ref->kref, ttm_ref_object_release);
348 write_unlock(&tfile->lock);
349 return 0;
350}
351EXPORT_SYMBOL(ttm_ref_object_base_unref);
352
353void ttm_object_file_release(struct ttm_object_file **p_tfile)
354{
355 struct ttm_ref_object *ref;
356 struct list_head *list;
357 unsigned int i;
358 struct ttm_object_file *tfile = *p_tfile;
359
360 *p_tfile = NULL;
361 write_lock(&tfile->lock);
362
363 /*
364 * Since we release the lock within the loop, we have to
365 * restart it from the beginning each time.
366 */
367
368 while (!list_empty(&tfile->ref_list)) {
369 list = tfile->ref_list.next;
370 ref = list_entry(list, struct ttm_ref_object, head);
371 ttm_ref_object_release(&ref->kref);
372 }
373
374 for (i = 0; i < TTM_REF_NUM; ++i)
375 drm_ht_remove(&tfile->ref_hash[i]);
376
377 write_unlock(&tfile->lock);
378 ttm_object_file_unref(&tfile);
379}
380EXPORT_SYMBOL(ttm_object_file_release);
381
382struct ttm_object_file *ttm_object_file_init(struct ttm_object_device *tdev,
383 unsigned int hash_order)
384{
385 struct ttm_object_file *tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
386 unsigned int i;
387 unsigned int j = 0;
388 int ret;
389
390 if (unlikely(tfile == NULL))
391 return NULL;
392
393 rwlock_init(&tfile->lock);
394 tfile->tdev = tdev;
395 kref_init(&tfile->refcount);
396 INIT_LIST_HEAD(&tfile->ref_list);
397
398 for (i = 0; i < TTM_REF_NUM; ++i) {
399 ret = drm_ht_create(&tfile->ref_hash[i], hash_order);
400 if (ret) {
401 j = i;
402 goto out_err;
403 }
404 }
405
406 return tfile;
407out_err:
408 for (i = 0; i < j; ++i)
409 drm_ht_remove(&tfile->ref_hash[i]);
410
411 kfree(tfile);
412
413 return NULL;
414}
415EXPORT_SYMBOL(ttm_object_file_init);
416
417struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
418 *mem_glob,
419 unsigned int hash_order)
420{
421 struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
422 int ret;
423
424 if (unlikely(tdev == NULL))
425 return NULL;
426
427 tdev->mem_glob = mem_glob;
428 rwlock_init(&tdev->object_lock);
429 atomic_set(&tdev->object_count, 0);
430 ret = drm_ht_create(&tdev->object_hash, hash_order);
431
432 if (likely(ret == 0))
433 return tdev;
434
435 kfree(tdev);
436 return NULL;
437}
438EXPORT_SYMBOL(ttm_object_device_init);
439
440void ttm_object_device_release(struct ttm_object_device **p_tdev)
441{
442 struct ttm_object_device *tdev = *p_tdev;
443
444 *p_tdev = NULL;
445
446 write_lock(&tdev->object_lock);
447 drm_ht_remove(&tdev->object_hash);
448 write_unlock(&tdev->object_lock);
449
450 kfree(tdev);
451}
452EXPORT_SYMBOL(ttm_object_device_release);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7bcb89f39ce8..9c2b1cc5dba5 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -192,6 +192,7 @@ int ttm_tt_populate(struct ttm_tt *ttm)
192 ttm->state = tt_unbound; 192 ttm->state = tt_unbound;
193 return 0; 193 return 0;
194} 194}
195EXPORT_SYMBOL(ttm_tt_populate);
195 196
196#ifdef CONFIG_X86 197#ifdef CONFIG_X86
197static inline int ttm_tt_set_page_caching(struct page *p, 198static inline int ttm_tt_set_page_caching(struct page *p,
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 0258289f3b3e..e2997a8d5e1b 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -1253,10 +1253,9 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1253{ 1253{
1254 struct hid_device *hid = usb_get_intfdata(intf); 1254 struct hid_device *hid = usb_get_intfdata(intf);
1255 struct usbhid_device *usbhid = hid->driver_data; 1255 struct usbhid_device *usbhid = hid->driver_data;
1256 struct usb_device *udev = interface_to_usbdev(intf);
1257 int status; 1256 int status;
1258 1257
1259 if (udev->auto_pm) { 1258 if (message.event & PM_EVENT_AUTO) {
1260 spin_lock_irq(&usbhid->lock); /* Sync with error handler */ 1259 spin_lock_irq(&usbhid->lock); /* Sync with error handler */
1261 if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) 1260 if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
1262 && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) 1261 && !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
@@ -1281,7 +1280,7 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1281 return -EIO; 1280 return -EIO;
1282 } 1281 }
1283 1282
1284 if (!ignoreled && udev->auto_pm) { 1283 if (!ignoreled && (message.event & PM_EVENT_AUTO)) {
1285 spin_lock_irq(&usbhid->lock); 1284 spin_lock_irq(&usbhid->lock);
1286 if (test_bit(HID_LED_ON, &usbhid->iofl)) { 1285 if (test_bit(HID_LED_ON, &usbhid->iofl)) {
1287 spin_unlock_irq(&usbhid->lock); 1286 spin_unlock_irq(&usbhid->lock);
@@ -1294,7 +1293,8 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
1294 hid_cancel_delayed_stuff(usbhid); 1293 hid_cancel_delayed_stuff(usbhid);
1295 hid_cease_io(usbhid); 1294 hid_cease_io(usbhid);
1296 1295
1297 if (udev->auto_pm && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { 1296 if ((message.event & PM_EVENT_AUTO) &&
1297 test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) {
1298 /* lost race against keypresses */ 1298 /* lost race against keypresses */
1299 status = hid_start_in(hid); 1299 status = hid_start_in(hid);
1300 if (status < 0) 1300 if (status < 0)
diff --git a/drivers/input/xen-kbdfront.c b/drivers/input/xen-kbdfront.c
index b115726dc088..c721c0a23eb8 100644
--- a/drivers/input/xen-kbdfront.c
+++ b/drivers/input/xen-kbdfront.c
@@ -21,7 +21,10 @@
21#include <linux/errno.h> 21#include <linux/errno.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/input.h> 23#include <linux/input.h>
24
24#include <asm/xen/hypervisor.h> 25#include <asm/xen/hypervisor.h>
26
27#include <xen/xen.h>
25#include <xen/events.h> 28#include <xen/events.h>
26#include <xen/page.h> 29#include <xen/page.h>
27#include <xen/interface/io/fbif.h> 30#include <xen/interface/io/fbif.h>
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index e4ff50b95a5e..fcb6ec1af173 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -712,6 +712,12 @@ static int run_simple_test(int is_get_char, int chr)
712 712
713 /* End of packet == #XX so look for the '#' */ 713 /* End of packet == #XX so look for the '#' */
714 if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') { 714 if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
715 if (put_buf_cnt >= BUFMAX) {
716 eprintk("kgdbts: ERROR: put buffer overflow on"
717 " '%s' line %i\n", ts.name, ts.idx);
718 put_buf_cnt = 0;
719 return 0;
720 }
715 put_buf[put_buf_cnt] = '\0'; 721 put_buf[put_buf_cnt] = '\0';
716 v2printk("put%i: %s\n", ts.idx, put_buf); 722 v2printk("put%i: %s\n", ts.idx, put_buf);
717 /* Trigger check here */ 723 /* Trigger check here */
@@ -885,16 +891,16 @@ static void kgdbts_run_tests(void)
885 int nmi_sleep = 0; 891 int nmi_sleep = 0;
886 int i; 892 int i;
887 893
888 ptr = strstr(config, "F"); 894 ptr = strchr(config, 'F');
889 if (ptr) 895 if (ptr)
890 fork_test = simple_strtol(ptr + 1, NULL, 10); 896 fork_test = simple_strtol(ptr + 1, NULL, 10);
891 ptr = strstr(config, "S"); 897 ptr = strchr(config, 'S');
892 if (ptr) 898 if (ptr)
893 do_sys_open_test = simple_strtol(ptr + 1, NULL, 10); 899 do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
894 ptr = strstr(config, "N"); 900 ptr = strchr(config, 'N');
895 if (ptr) 901 if (ptr)
896 nmi_sleep = simple_strtol(ptr+1, NULL, 10); 902 nmi_sleep = simple_strtol(ptr+1, NULL, 10);
897 ptr = strstr(config, "I"); 903 ptr = strchr(config, 'I');
898 if (ptr) 904 if (ptr)
899 sstep_test = simple_strtol(ptr+1, NULL, 10); 905 sstep_test = simple_strtol(ptr+1, NULL, 10);
900 906
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 47e84ef355c5..3b48681f8a0d 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -579,7 +579,7 @@ void i2400mu_disconnect(struct usb_interface *iface)
579 * 579 *
580 * As well, the device might refuse going to sleep for whichever 580 * As well, the device might refuse going to sleep for whichever
581 * reason. In this case we just fail. For system suspend/hibernate, 581 * reason. In this case we just fail. For system suspend/hibernate,
582 * we *can't* fail. We look at usb_dev->auto_pm to see if the 582 * we *can't* fail. We check PM_EVENT_AUTO to see if the
583 * suspend call comes from the USB stack or from the system and act 583 * suspend call comes from the USB stack or from the system and act
584 * in consequence. 584 * in consequence.
585 * 585 *
@@ -591,14 +591,11 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
591 int result = 0; 591 int result = 0;
592 struct device *dev = &iface->dev; 592 struct device *dev = &iface->dev;
593 struct i2400mu *i2400mu = usb_get_intfdata(iface); 593 struct i2400mu *i2400mu = usb_get_intfdata(iface);
594#ifdef CONFIG_PM
595 struct usb_device *usb_dev = i2400mu->usb_dev;
596#endif
597 unsigned is_autosuspend = 0; 594 unsigned is_autosuspend = 0;
598 struct i2400m *i2400m = &i2400mu->i2400m; 595 struct i2400m *i2400m = &i2400mu->i2400m;
599 596
600#ifdef CONFIG_PM 597#ifdef CONFIG_PM
601 if (usb_dev->auto_pm > 0) 598 if (pm_msg.event & PM_EVENT_AUTO)
602 is_autosuspend = 1; 599 is_autosuspend = 1;
603#endif 600#endif
604 601
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index baa051d5bfbe..a869b45d3d37 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -42,6 +42,7 @@
42#include <linux/mm.h> 42#include <linux/mm.h>
43#include <net/ip.h> 43#include <net/ip.h>
44 44
45#include <xen/xen.h>
45#include <xen/xenbus.h> 46#include <xen/xenbus.h>
46#include <xen/events.h> 47#include <xen/events.h>
47#include <xen/page.h> 48#include <xen/page.h>
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index fdc864f9cf23..b1ecefa2a23d 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -27,10 +27,10 @@ config PCI_LEGACY
27 default y 27 default y
28 help 28 help
29 Say Y here if you want to include support for the deprecated 29 Say Y here if you want to include support for the deprecated
30 pci_find_slot() and pci_find_device() APIs. Most drivers have 30 pci_find_device() API. Most drivers have been converted over
31 been converted over to using the proper hotplug APIs, so this 31 to using the proper hotplug APIs, so this option serves to
32 option serves to include/exclude only a few drivers that are 32 include/exclude only a few drivers that are still using this
33 still using this API. 33 API.
34 34
35config PCI_DEBUG 35config PCI_DEBUG
36 bool "PCI Debugging" 36 bool "PCI Debugging"
@@ -69,3 +69,10 @@ config PCI_IOV
69 physical resources. 69 physical resources.
70 70
71 If unsure, say N. 71 If unsure, say N.
72
73config PCI_IOAPIC
74 bool
75 depends on PCI
76 depends on ACPI
77 depends on HOTPLUG
78 default y
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 4a7f11d8f432..4df48d58eaa6 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -14,6 +14,8 @@ CFLAGS_legacy.o += -Wno-deprecated-declarations
14# Build PCI Express stuff if needed 14# Build PCI Express stuff if needed
15obj-$(CONFIG_PCIEPORTBUS) += pcie/ 15obj-$(CONFIG_PCIEPORTBUS) += pcie/
16 16
17obj-$(CONFIG_PCI_IOAPIC) += ioapic.o
18
17obj-$(CONFIG_HOTPLUG) += hotplug.o 19obj-$(CONFIG_HOTPLUG) += hotplug.o
18 20
19# Build the PCI Hotplug drivers if we were asked to 21# Build the PCI Hotplug drivers if we were asked to
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index 416f6ac65b76..6cdc931f7c17 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -320,7 +320,7 @@ found:
320 for (bus = dev->bus; bus; bus = bus->parent) { 320 for (bus = dev->bus; bus; bus = bus->parent) {
321 struct pci_dev *bridge = bus->self; 321 struct pci_dev *bridge = bus->self;
322 322
323 if (!bridge || !bridge->is_pcie || 323 if (!bridge || !pci_is_pcie(bridge) ||
324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) 324 bridge->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
325 return 0; 325 return 0;
326 326
@@ -645,8 +645,11 @@ void __init detect_intel_iommu(void)
645 "x2apic and Intr-remapping.\n"); 645 "x2apic and Intr-remapping.\n");
646#endif 646#endif
647#ifdef CONFIG_DMAR 647#ifdef CONFIG_DMAR
648 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) 648 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
649 iommu_detected = 1; 649 iommu_detected = 1;
650 /* Make sure ACS will be enabled */
651 pci_request_acs();
652 }
650#endif 653#endif
651#ifdef CONFIG_X86 654#ifdef CONFIG_X86
652 if (ret) 655 if (ret)
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3625b094bf7e..6cd9f3c9887d 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -6,18 +6,22 @@ obj-$(CONFIG_HOTPLUG_PCI) += pci_hotplug.o
6obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o 6obj-$(CONFIG_HOTPLUG_PCI_COMPAQ) += cpqphp.o
7obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o 7obj-$(CONFIG_HOTPLUG_PCI_IBM) += ibmphp.o
8 8
9# pciehp should be linked before acpiphp in order to allow the native driver 9# native drivers should be linked before acpiphp in order to allow the
10# to attempt to bind first. We can then fall back to generic support. 10# native driver to attempt to bind first. We can then fall back to
11# generic support.
11 12
12obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o 13obj-$(CONFIG_HOTPLUG_PCI_PCIE) += pciehp.o
13obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
14obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
15obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o 14obj-$(CONFIG_HOTPLUG_PCI_CPCI_ZT5550) += cpcihp_zt5550.o
16obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o 15obj-$(CONFIG_HOTPLUG_PCI_CPCI_GENERIC) += cpcihp_generic.o
17obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o 16obj-$(CONFIG_HOTPLUG_PCI_SHPC) += shpchp.o
18obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o 17obj-$(CONFIG_HOTPLUG_PCI_RPA) += rpaphp.o
19obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o 18obj-$(CONFIG_HOTPLUG_PCI_RPA_DLPAR) += rpadlpar_io.o
20obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o 19obj-$(CONFIG_HOTPLUG_PCI_SGI) += sgi_hotplug.o
20obj-$(CONFIG_HOTPLUG_PCI_ACPI) += acpiphp.o
21
22# acpiphp_ibm extends acpiphp, so should be linked afterwards.
23
24obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM) += acpiphp_ibm.o
21 25
22# Link this last so it doesn't claim devices that have a real hotplug driver 26# Link this last so it doesn't claim devices that have a real hotplug driver
23obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o 27obj-$(CONFIG_HOTPLUG_PCI_FAKE) += fakephp.o
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 0f32571b94df..3c76fc67cf0e 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -362,6 +362,8 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
362 status = acpi_pci_osc_control_set(handle, flags); 362 status = acpi_pci_osc_control_set(handle, flags);
363 if (ACPI_SUCCESS(status)) 363 if (ACPI_SUCCESS(status))
364 goto got_one; 364 goto got_one;
365 if (status == AE_SUPPORT)
366 goto no_control;
365 kfree(string.pointer); 367 kfree(string.pointer);
366 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; 368 string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL };
367 } 369 }
@@ -394,10 +396,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev, u32 flags)
394 if (ACPI_FAILURE(status)) 396 if (ACPI_FAILURE(status))
395 break; 397 break;
396 } 398 }
397 399no_control:
398 dbg("Cannot get control of hotplug hardware for pci %s\n", 400 dbg("Cannot get control of hotplug hardware for pci %s\n",
399 pci_name(pdev)); 401 pci_name(pdev));
400
401 kfree(string.pointer); 402 kfree(string.pointer);
402 return -ENODEV; 403 return -ENODEV;
403got_one: 404got_one:
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 7d938df79206..bab52047baa8 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -146,12 +146,6 @@ struct acpiphp_attention_info
146 struct module *owner; 146 struct module *owner;
147}; 147};
148 148
149struct acpiphp_ioapic {
150 struct pci_dev *dev;
151 u32 gsi_base;
152 struct list_head list;
153};
154
155/* PCI bus bridge HID */ 149/* PCI bus bridge HID */
156#define ACPI_PCI_HOST_HID "PNP0A03" 150#define ACPI_PCI_HOST_HID "PNP0A03"
157 151
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index df1b0ea089d1..8e952fdab764 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -52,8 +52,6 @@
52#include "acpiphp.h" 52#include "acpiphp.h"
53 53
54static LIST_HEAD(bridge_list); 54static LIST_HEAD(bridge_list);
55static LIST_HEAD(ioapic_list);
56static DEFINE_SPINLOCK(ioapic_list_lock);
57 55
58#define MY_NAME "acpiphp_glue" 56#define MY_NAME "acpiphp_glue"
59 57
@@ -311,17 +309,13 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
311/* find acpiphp_func from acpiphp_bridge */ 309/* find acpiphp_func from acpiphp_bridge */
312static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle) 310static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
313{ 311{
314 struct list_head *node, *l;
315 struct acpiphp_bridge *bridge; 312 struct acpiphp_bridge *bridge;
316 struct acpiphp_slot *slot; 313 struct acpiphp_slot *slot;
317 struct acpiphp_func *func; 314 struct acpiphp_func *func;
318 315
319 list_for_each(node, &bridge_list) { 316 list_for_each_entry(bridge, &bridge_list, list) {
320 bridge = list_entry(node, struct acpiphp_bridge, list);
321 for (slot = bridge->slots; slot; slot = slot->next) { 317 for (slot = bridge->slots; slot; slot = slot->next) {
322 list_for_each(l, &slot->funcs) { 318 list_for_each_entry(func, &slot->funcs, sibling) {
323 func = list_entry(l, struct acpiphp_func,
324 sibling);
325 if (func->handle == handle) 319 if (func->handle == handle)
326 return func; 320 return func;
327 } 321 }
@@ -495,21 +489,19 @@ static int add_bridge(acpi_handle handle)
495 489
496static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle) 490static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
497{ 491{
498 struct list_head *head; 492 struct acpiphp_bridge *bridge;
499 list_for_each(head, &bridge_list) { 493
500 struct acpiphp_bridge *bridge = list_entry(head, 494 list_for_each_entry(bridge, &bridge_list, list)
501 struct acpiphp_bridge, list);
502 if (bridge->handle == handle) 495 if (bridge->handle == handle)
503 return bridge; 496 return bridge;
504 }
505 497
506 return NULL; 498 return NULL;
507} 499}
508 500
509static void cleanup_bridge(struct acpiphp_bridge *bridge) 501static void cleanup_bridge(struct acpiphp_bridge *bridge)
510{ 502{
511 struct list_head *list, *tmp; 503 struct acpiphp_slot *slot, *next;
512 struct acpiphp_slot *slot; 504 struct acpiphp_func *func, *tmp;
513 acpi_status status; 505 acpi_status status;
514 acpi_handle handle = bridge->handle; 506 acpi_handle handle = bridge->handle;
515 507
@@ -530,10 +522,8 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
530 522
531 slot = bridge->slots; 523 slot = bridge->slots;
532 while (slot) { 524 while (slot) {
533 struct acpiphp_slot *next = slot->next; 525 next = slot->next;
534 list_for_each_safe (list, tmp, &slot->funcs) { 526 list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
535 struct acpiphp_func *func;
536 func = list_entry(list, struct acpiphp_func, sibling);
537 if (is_dock_device(func->handle)) { 527 if (is_dock_device(func->handle)) {
538 unregister_hotplug_dock_device(func->handle); 528 unregister_hotplug_dock_device(func->handle);
539 unregister_dock_notifier(&func->nb); 529 unregister_dock_notifier(&func->nb);
@@ -545,7 +535,7 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
545 if (ACPI_FAILURE(status)) 535 if (ACPI_FAILURE(status))
546 err("failed to remove notify handler\n"); 536 err("failed to remove notify handler\n");
547 } 537 }
548 list_del(list); 538 list_del(&func->sibling);
549 kfree(func); 539 kfree(func);
550 } 540 }
551 acpiphp_unregister_hotplug_slot(slot); 541 acpiphp_unregister_hotplug_slot(slot);
@@ -606,204 +596,17 @@ static void remove_bridge(acpi_handle handle)
606 handle_hotplug_event_bridge); 596 handle_hotplug_event_bridge);
607} 597}
608 598
609static struct pci_dev * get_apic_pci_info(acpi_handle handle)
610{
611 struct pci_dev *dev;
612
613 dev = acpi_get_pci_dev(handle);
614 if (!dev)
615 return NULL;
616
617 if ((dev->class != PCI_CLASS_SYSTEM_PIC_IOAPIC) &&
618 (dev->class != PCI_CLASS_SYSTEM_PIC_IOXAPIC))
619 {
620 pci_dev_put(dev);
621 return NULL;
622 }
623
624 return dev;
625}
626
627static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
628{
629 acpi_status status;
630 int result = -1;
631 unsigned long long gsb;
632 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
633 union acpi_object *obj;
634 void *table;
635
636 status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
637 if (ACPI_SUCCESS(status)) {
638 *gsi_base = (u32)gsb;
639 return 0;
640 }
641
642 status = acpi_evaluate_object(handle, "_MAT", NULL, &buffer);
643 if (ACPI_FAILURE(status) || !buffer.length || !buffer.pointer)
644 return -1;
645
646 obj = buffer.pointer;
647 if (obj->type != ACPI_TYPE_BUFFER)
648 goto out;
649
650 table = obj->buffer.pointer;
651 switch (((struct acpi_subtable_header *)table)->type) {
652 case ACPI_MADT_TYPE_IO_SAPIC:
653 *gsi_base = ((struct acpi_madt_io_sapic *)table)->global_irq_base;
654 result = 0;
655 break;
656 case ACPI_MADT_TYPE_IO_APIC:
657 *gsi_base = ((struct acpi_madt_io_apic *)table)->global_irq_base;
658 result = 0;
659 break;
660 default:
661 break;
662 }
663 out:
664 kfree(buffer.pointer);
665 return result;
666}
667
668static acpi_status
669ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
670{
671 acpi_status status;
672 unsigned long long sta;
673 acpi_handle tmp;
674 struct pci_dev *pdev;
675 u32 gsi_base;
676 u64 phys_addr;
677 struct acpiphp_ioapic *ioapic;
678
679 /* Evaluate _STA if present */
680 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
681 if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
682 return AE_CTRL_DEPTH;
683
684 /* Scan only PCI bus scope */
685 status = acpi_get_handle(handle, "_HID", &tmp);
686 if (ACPI_SUCCESS(status))
687 return AE_CTRL_DEPTH;
688
689 if (get_gsi_base(handle, &gsi_base))
690 return AE_OK;
691
692 ioapic = kmalloc(sizeof(*ioapic), GFP_KERNEL);
693 if (!ioapic)
694 return AE_NO_MEMORY;
695
696 pdev = get_apic_pci_info(handle);
697 if (!pdev)
698 goto exit_kfree;
699
700 if (pci_enable_device(pdev))
701 goto exit_pci_dev_put;
702
703 pci_set_master(pdev);
704
705 if (pci_request_region(pdev, 0, "I/O APIC(acpiphp)"))
706 goto exit_pci_disable_device;
707
708 phys_addr = pci_resource_start(pdev, 0);
709 if (acpi_register_ioapic(handle, phys_addr, gsi_base))
710 goto exit_pci_release_region;
711
712 ioapic->gsi_base = gsi_base;
713 ioapic->dev = pdev;
714 spin_lock(&ioapic_list_lock);
715 list_add_tail(&ioapic->list, &ioapic_list);
716 spin_unlock(&ioapic_list_lock);
717
718 return AE_OK;
719
720 exit_pci_release_region:
721 pci_release_region(pdev, 0);
722 exit_pci_disable_device:
723 pci_disable_device(pdev);
724 exit_pci_dev_put:
725 pci_dev_put(pdev);
726 exit_kfree:
727 kfree(ioapic);
728
729 return AE_OK;
730}
731
732static acpi_status
733ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv)
734{
735 acpi_status status;
736 unsigned long long sta;
737 acpi_handle tmp;
738 u32 gsi_base;
739 struct acpiphp_ioapic *pos, *n, *ioapic = NULL;
740
741 /* Evaluate _STA if present */
742 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
743 if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
744 return AE_CTRL_DEPTH;
745
746 /* Scan only PCI bus scope */
747 status = acpi_get_handle(handle, "_HID", &tmp);
748 if (ACPI_SUCCESS(status))
749 return AE_CTRL_DEPTH;
750
751 if (get_gsi_base(handle, &gsi_base))
752 return AE_OK;
753
754 acpi_unregister_ioapic(handle, gsi_base);
755
756 spin_lock(&ioapic_list_lock);
757 list_for_each_entry_safe(pos, n, &ioapic_list, list) {
758 if (pos->gsi_base != gsi_base)
759 continue;
760 ioapic = pos;
761 list_del(&ioapic->list);
762 break;
763 }
764 spin_unlock(&ioapic_list_lock);
765
766 if (!ioapic)
767 return AE_OK;
768
769 pci_release_region(ioapic->dev, 0);
770 pci_disable_device(ioapic->dev);
771 pci_dev_put(ioapic->dev);
772 kfree(ioapic);
773
774 return AE_OK;
775}
776
777static int acpiphp_configure_ioapics(acpi_handle handle)
778{
779 ioapic_add(handle, 0, NULL, NULL);
780 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
781 ACPI_UINT32_MAX, ioapic_add, NULL, NULL, NULL);
782 return 0;
783}
784
785static int acpiphp_unconfigure_ioapics(acpi_handle handle)
786{
787 ioapic_remove(handle, 0, NULL, NULL);
788 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
789 ACPI_UINT32_MAX, ioapic_remove, NULL, NULL, NULL);
790 return 0;
791}
792
793static int power_on_slot(struct acpiphp_slot *slot) 599static int power_on_slot(struct acpiphp_slot *slot)
794{ 600{
795 acpi_status status; 601 acpi_status status;
796 struct acpiphp_func *func; 602 struct acpiphp_func *func;
797 struct list_head *l;
798 int retval = 0; 603 int retval = 0;
799 604
800 /* if already enabled, just skip */ 605 /* if already enabled, just skip */
801 if (slot->flags & SLOT_POWEREDON) 606 if (slot->flags & SLOT_POWEREDON)
802 goto err_exit; 607 goto err_exit;
803 608
804 list_for_each (l, &slot->funcs) { 609 list_for_each_entry(func, &slot->funcs, sibling) {
805 func = list_entry(l, struct acpiphp_func, sibling);
806
807 if (func->flags & FUNC_HAS_PS0) { 610 if (func->flags & FUNC_HAS_PS0) {
808 dbg("%s: executing _PS0\n", __func__); 611 dbg("%s: executing _PS0\n", __func__);
809 status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL); 612 status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
@@ -829,7 +632,6 @@ static int power_off_slot(struct acpiphp_slot *slot)
829{ 632{
830 acpi_status status; 633 acpi_status status;
831 struct acpiphp_func *func; 634 struct acpiphp_func *func;
832 struct list_head *l;
833 635
834 int retval = 0; 636 int retval = 0;
835 637
@@ -837,9 +639,7 @@ static int power_off_slot(struct acpiphp_slot *slot)
837 if ((slot->flags & SLOT_POWEREDON) == 0) 639 if ((slot->flags & SLOT_POWEREDON) == 0)
838 goto err_exit; 640 goto err_exit;
839 641
840 list_for_each (l, &slot->funcs) { 642 list_for_each_entry(func, &slot->funcs, sibling) {
841 func = list_entry(l, struct acpiphp_func, sibling);
842
843 if (func->flags & FUNC_HAS_PS3) { 643 if (func->flags & FUNC_HAS_PS3) {
844 status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL); 644 status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
845 if (ACPI_FAILURE(status)) { 645 if (ACPI_FAILURE(status)) {
@@ -966,7 +766,6 @@ static int __ref enable_device(struct acpiphp_slot *slot)
966{ 766{
967 struct pci_dev *dev; 767 struct pci_dev *dev;
968 struct pci_bus *bus = slot->bridge->pci_bus; 768 struct pci_bus *bus = slot->bridge->pci_bus;
969 struct list_head *l;
970 struct acpiphp_func *func; 769 struct acpiphp_func *func;
971 int retval = 0; 770 int retval = 0;
972 int num, max, pass; 771 int num, max, pass;
@@ -1006,21 +805,16 @@ static int __ref enable_device(struct acpiphp_slot *slot)
1006 } 805 }
1007 } 806 }
1008 807
1009 list_for_each (l, &slot->funcs) { 808 list_for_each_entry(func, &slot->funcs, sibling)
1010 func = list_entry(l, struct acpiphp_func, sibling);
1011 acpiphp_bus_add(func); 809 acpiphp_bus_add(func);
1012 }
1013 810
1014 pci_bus_assign_resources(bus); 811 pci_bus_assign_resources(bus);
1015 acpiphp_sanitize_bus(bus); 812 acpiphp_sanitize_bus(bus);
1016 acpiphp_set_hpp_values(bus); 813 acpiphp_set_hpp_values(bus);
1017 list_for_each_entry(func, &slot->funcs, sibling)
1018 acpiphp_configure_ioapics(func->handle);
1019 pci_enable_bridges(bus); 814 pci_enable_bridges(bus);
1020 pci_bus_add_devices(bus); 815 pci_bus_add_devices(bus);
1021 816
1022 list_for_each (l, &slot->funcs) { 817 list_for_each_entry(func, &slot->funcs, sibling) {
1023 func = list_entry(l, struct acpiphp_func, sibling);
1024 dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 818 dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
1025 func->function)); 819 func->function));
1026 if (!dev) 820 if (!dev)
@@ -1091,7 +885,6 @@ static int disable_device(struct acpiphp_slot *slot)
1091 } 885 }
1092 886
1093 list_for_each_entry(func, &slot->funcs, sibling) { 887 list_for_each_entry(func, &slot->funcs, sibling) {
1094 acpiphp_unconfigure_ioapics(func->handle);
1095 acpiphp_bus_trim(func->handle); 888 acpiphp_bus_trim(func->handle);
1096 } 889 }
1097 890
@@ -1119,12 +912,9 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
1119 acpi_status status; 912 acpi_status status;
1120 unsigned long long sta = 0; 913 unsigned long long sta = 0;
1121 u32 dvid; 914 u32 dvid;
1122 struct list_head *l;
1123 struct acpiphp_func *func; 915 struct acpiphp_func *func;
1124 916
1125 list_for_each (l, &slot->funcs) { 917 list_for_each_entry(func, &slot->funcs, sibling) {
1126 func = list_entry(l, struct acpiphp_func, sibling);
1127
1128 if (func->flags & FUNC_HAS_STA) { 918 if (func->flags & FUNC_HAS_STA) {
1129 status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta); 919 status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta);
1130 if (ACPI_SUCCESS(status) && sta) 920 if (ACPI_SUCCESS(status) && sta)
@@ -1152,13 +942,10 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot)
1152{ 942{
1153 acpi_status status; 943 acpi_status status;
1154 struct acpiphp_func *func; 944 struct acpiphp_func *func;
1155 struct list_head *l;
1156 struct acpi_object_list arg_list; 945 struct acpi_object_list arg_list;
1157 union acpi_object arg; 946 union acpi_object arg;
1158 947
1159 list_for_each (l, &slot->funcs) { 948 list_for_each_entry(func, &slot->funcs, sibling) {
1160 func = list_entry(l, struct acpiphp_func, sibling);
1161
1162 /* We don't want to call _EJ0 on non-existing functions. */ 949 /* We don't want to call _EJ0 on non-existing functions. */
1163 if ((func->flags & FUNC_HAS_EJ0)) { 950 if ((func->flags & FUNC_HAS_EJ0)) {
1164 /* _EJ0 method take one argument */ 951 /* _EJ0 method take one argument */
@@ -1275,7 +1062,6 @@ static int acpiphp_configure_bridge (acpi_handle handle)
1275 acpiphp_sanitize_bus(bus); 1062 acpiphp_sanitize_bus(bus);
1276 acpiphp_set_hpp_values(bus); 1063 acpiphp_set_hpp_values(bus);
1277 pci_enable_bridges(bus); 1064 pci_enable_bridges(bus);
1278 acpiphp_configure_ioapics(handle);
1279 return 0; 1065 return 0;
1280} 1066}
1281 1067
@@ -1542,7 +1328,7 @@ int __init acpiphp_get_num_slots(void)
1542 struct acpiphp_bridge *bridge; 1328 struct acpiphp_bridge *bridge;
1543 int num_slots = 0; 1329 int num_slots = 0;
1544 1330
1545 list_for_each_entry (bridge, &bridge_list, list) { 1331 list_for_each_entry(bridge, &bridge_list, list) {
1546 dbg("Bus %04x:%02x has %d slot%s\n", 1332 dbg("Bus %04x:%02x has %d slot%s\n",
1547 pci_domain_nr(bridge->pci_bus), 1333 pci_domain_nr(bridge->pci_bus),
1548 bridge->pci_bus->number, bridge->nr_slots, 1334 bridge->pci_bus->number, bridge->nr_slots,
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index 83f337c891a9..c7084f0eca5a 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -890,7 +890,7 @@ static int poll_hpc(void *data)
890 msleep(POLL_INTERVAL_SEC * 1000); 890 msleep(POLL_INTERVAL_SEC * 1000);
891 891
892 if (kthread_should_stop()) 892 if (kthread_should_stop())
893 break; 893 goto out_sleep;
894 894
895 down (&semOperations); 895 down (&semOperations);
896 896
@@ -904,6 +904,7 @@ static int poll_hpc(void *data)
904 /* give up the hardware semaphore */ 904 /* give up the hardware semaphore */
905 up (&semOperations); 905 up (&semOperations);
906 /* sleep for a short time just for good measure */ 906 /* sleep for a short time just for good measure */
907out_sleep:
907 msleep(100); 908 msleep(100);
908 } 909 }
909 up (&sem_exit); 910 up (&sem_exit);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 0325d989bb46..38183a534b65 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -68,26 +68,26 @@ static DEFINE_MUTEX(pci_hp_mutex);
68static char *pci_bus_speed_strings[] = { 68static char *pci_bus_speed_strings[] = {
69 "33 MHz PCI", /* 0x00 */ 69 "33 MHz PCI", /* 0x00 */
70 "66 MHz PCI", /* 0x01 */ 70 "66 MHz PCI", /* 0x01 */
71 "66 MHz PCIX", /* 0x02 */ 71 "66 MHz PCI-X", /* 0x02 */
72 "100 MHz PCIX", /* 0x03 */ 72 "100 MHz PCI-X", /* 0x03 */
73 "133 MHz PCIX", /* 0x04 */ 73 "133 MHz PCI-X", /* 0x04 */
74 NULL, /* 0x05 */ 74 NULL, /* 0x05 */
75 NULL, /* 0x06 */ 75 NULL, /* 0x06 */
76 NULL, /* 0x07 */ 76 NULL, /* 0x07 */
77 NULL, /* 0x08 */ 77 NULL, /* 0x08 */
78 "66 MHz PCIX 266", /* 0x09 */ 78 "66 MHz PCI-X 266", /* 0x09 */
79 "100 MHz PCIX 266", /* 0x0a */ 79 "100 MHz PCI-X 266", /* 0x0a */
80 "133 MHz PCIX 266", /* 0x0b */ 80 "133 MHz PCI-X 266", /* 0x0b */
81 NULL, /* 0x0c */ 81 NULL, /* 0x0c */
82 NULL, /* 0x0d */ 82 NULL, /* 0x0d */
83 NULL, /* 0x0e */ 83 NULL, /* 0x0e */
84 NULL, /* 0x0f */ 84 NULL, /* 0x0f */
85 NULL, /* 0x10 */ 85 NULL, /* 0x10 */
86 "66 MHz PCIX 533", /* 0x11 */ 86 "66 MHz PCI-X 533", /* 0x11 */
87 "100 MHz PCIX 533", /* 0x12 */ 87 "100 MHz PCI-X 533", /* 0x12 */
88 "133 MHz PCIX 533", /* 0x13 */ 88 "133 MHz PCI-X 533", /* 0x13 */
89 "2.5 GT/s PCI-E", /* 0x14 */ 89 "2.5 GT/s PCIe", /* 0x14 */
90 "5.0 GT/s PCI-E", /* 0x15 */ 90 "5.0 GT/s PCIe", /* 0x15 */
91}; 91};
92 92
93#ifdef CONFIG_HOTPLUG_PCI_CPCI 93#ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 3070f77eb56a..4ed76b47b6dc 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -91,7 +91,6 @@ struct controller {
91 struct slot *slot; 91 struct slot *slot;
92 wait_queue_head_t queue; /* sleep & wake process */ 92 wait_queue_head_t queue; /* sleep & wake process */
93 u32 slot_cap; 93 u32 slot_cap;
94 u8 cap_base;
95 struct timer_list poll_timer; 94 struct timer_list poll_timer;
96 unsigned int cmd_busy:1; 95 unsigned int cmd_busy:1;
97 unsigned int no_cmd_complete:1; 96 unsigned int no_cmd_complete:1;
diff --git a/drivers/pci/hotplug/pciehp_acpi.c b/drivers/pci/hotplug/pciehp_acpi.c
index 37c8d3d0323e..b09b083011d6 100644
--- a/drivers/pci/hotplug/pciehp_acpi.c
+++ b/drivers/pci/hotplug/pciehp_acpi.c
@@ -87,7 +87,8 @@ static int __init dummy_probe(struct pcie_device *dev)
87 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */ 87 /* Note: pciehp_detect_mode != PCIEHP_DETECT_ACPI here */
88 if (pciehp_get_hp_hw_control_from_firmware(pdev)) 88 if (pciehp_get_hp_hw_control_from_firmware(pdev))
89 return -ENODEV; 89 return -ENODEV;
90 if (!(pos = pci_find_capability(pdev, PCI_CAP_ID_EXP))) 90 pos = pci_pcie_cap(pdev);
91 if (!pos)
91 return -ENODEV; 92 return -ENODEV;
92 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap); 93 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &slot_cap);
93 slot = kzalloc(sizeof(*slot), GFP_KERNEL); 94 slot = kzalloc(sizeof(*slot), GFP_KERNEL);
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index bc234719b1df..5674b2075bdc 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -72,18 +72,6 @@ static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
72static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 72static int get_max_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value); 73static int get_cur_bus_speed (struct hotplug_slot *slot, enum pci_bus_speed *value);
74 74
75static struct hotplug_slot_ops pciehp_hotplug_slot_ops = {
76 .set_attention_status = set_attention_status,
77 .enable_slot = enable_slot,
78 .disable_slot = disable_slot,
79 .get_power_status = get_power_status,
80 .get_attention_status = get_attention_status,
81 .get_latch_status = get_latch_status,
82 .get_adapter_status = get_adapter_status,
83 .get_max_bus_speed = get_max_bus_speed,
84 .get_cur_bus_speed = get_cur_bus_speed,
85};
86
87/** 75/**
88 * release_slot - free up the memory used by a slot 76 * release_slot - free up the memory used by a slot
89 * @hotplug_slot: slot to free 77 * @hotplug_slot: slot to free
@@ -95,6 +83,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
95 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 83 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
96 __func__, hotplug_slot_name(hotplug_slot)); 84 __func__, hotplug_slot_name(hotplug_slot));
97 85
86 kfree(hotplug_slot->ops);
98 kfree(hotplug_slot->info); 87 kfree(hotplug_slot->info);
99 kfree(hotplug_slot); 88 kfree(hotplug_slot);
100} 89}
@@ -104,6 +93,7 @@ static int init_slot(struct controller *ctrl)
104 struct slot *slot = ctrl->slot; 93 struct slot *slot = ctrl->slot;
105 struct hotplug_slot *hotplug = NULL; 94 struct hotplug_slot *hotplug = NULL;
106 struct hotplug_slot_info *info = NULL; 95 struct hotplug_slot_info *info = NULL;
96 struct hotplug_slot_ops *ops = NULL;
107 char name[SLOT_NAME_SIZE]; 97 char name[SLOT_NAME_SIZE];
108 int retval = -ENOMEM; 98 int retval = -ENOMEM;
109 99
@@ -115,11 +105,28 @@ static int init_slot(struct controller *ctrl)
115 if (!info) 105 if (!info)
116 goto out; 106 goto out;
117 107
108 /* Setup hotplug slot ops */
109 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
110 if (!ops)
111 goto out;
112 ops->enable_slot = enable_slot;
113 ops->disable_slot = disable_slot;
114 ops->get_power_status = get_power_status;
115 ops->get_adapter_status = get_adapter_status;
116 ops->get_max_bus_speed = get_max_bus_speed;
117 ops->get_cur_bus_speed = get_cur_bus_speed;
118 if (MRL_SENS(ctrl))
119 ops->get_latch_status = get_latch_status;
120 if (ATTN_LED(ctrl)) {
121 ops->get_attention_status = get_attention_status;
122 ops->set_attention_status = set_attention_status;
123 }
124
118 /* register this slot with the hotplug pci core */ 125 /* register this slot with the hotplug pci core */
119 hotplug->info = info; 126 hotplug->info = info;
120 hotplug->private = slot; 127 hotplug->private = slot;
121 hotplug->release = &release_slot; 128 hotplug->release = &release_slot;
122 hotplug->ops = &pciehp_hotplug_slot_ops; 129 hotplug->ops = ops;
123 slot->hotplug_slot = hotplug; 130 slot->hotplug_slot = hotplug;
124 snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); 131 snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
125 132
@@ -128,17 +135,12 @@ static int init_slot(struct controller *ctrl)
128 ctrl->pcie->port->subordinate->number, PSN(ctrl)); 135 ctrl->pcie->port->subordinate->number, PSN(ctrl));
129 retval = pci_hp_register(hotplug, 136 retval = pci_hp_register(hotplug,
130 ctrl->pcie->port->subordinate, 0, name); 137 ctrl->pcie->port->subordinate, 0, name);
131 if (retval) { 138 if (retval)
132 ctrl_err(ctrl, 139 ctrl_err(ctrl,
133 "pci_hp_register failed with error %d\n", retval); 140 "pci_hp_register failed with error %d\n", retval);
134 goto out;
135 }
136 get_power_status(hotplug, &info->power_status);
137 get_attention_status(hotplug, &info->attention_status);
138 get_latch_status(hotplug, &info->latch_status);
139 get_adapter_status(hotplug, &info->adapter_status);
140out: 141out:
141 if (retval) { 142 if (retval) {
143 kfree(ops);
142 kfree(info); 144 kfree(info);
143 kfree(hotplug); 145 kfree(hotplug);
144 } 146 }
@@ -160,12 +162,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
160 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 162 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
161 __func__, slot_name(slot)); 163 __func__, slot_name(slot));
162 164
163 hotplug_slot->info->attention_status = status; 165 return pciehp_set_attention_status(slot, status);
164
165 if (ATTN_LED(slot->ctrl))
166 pciehp_set_attention_status(slot, status);
167
168 return 0;
169} 166}
170 167
171 168
@@ -193,92 +190,62 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
193static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) 190static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
194{ 191{
195 struct slot *slot = hotplug_slot->private; 192 struct slot *slot = hotplug_slot->private;
196 int retval;
197 193
198 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 194 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
199 __func__, slot_name(slot)); 195 __func__, slot_name(slot));
200 196
201 retval = pciehp_get_power_status(slot, value); 197 return pciehp_get_power_status(slot, value);
202 if (retval < 0)
203 *value = hotplug_slot->info->power_status;
204
205 return 0;
206} 198}
207 199
208static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) 200static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
209{ 201{
210 struct slot *slot = hotplug_slot->private; 202 struct slot *slot = hotplug_slot->private;
211 int retval;
212 203
213 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 204 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
214 __func__, slot_name(slot)); 205 __func__, slot_name(slot));
215 206
216 retval = pciehp_get_attention_status(slot, value); 207 return pciehp_get_attention_status(slot, value);
217 if (retval < 0)
218 *value = hotplug_slot->info->attention_status;
219
220 return 0;
221} 208}
222 209
223static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) 210static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
224{ 211{
225 struct slot *slot = hotplug_slot->private; 212 struct slot *slot = hotplug_slot->private;
226 int retval;
227 213
228 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 214 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
229 __func__, slot_name(slot)); 215 __func__, slot_name(slot));
230 216
231 retval = pciehp_get_latch_status(slot, value); 217 return pciehp_get_latch_status(slot, value);
232 if (retval < 0)
233 *value = hotplug_slot->info->latch_status;
234
235 return 0;
236} 218}
237 219
238static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) 220static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
239{ 221{
240 struct slot *slot = hotplug_slot->private; 222 struct slot *slot = hotplug_slot->private;
241 int retval;
242 223
243 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 224 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
244 __func__, slot_name(slot)); 225 __func__, slot_name(slot));
245 226
246 retval = pciehp_get_adapter_status(slot, value); 227 return pciehp_get_adapter_status(slot, value);
247 if (retval < 0)
248 *value = hotplug_slot->info->adapter_status;
249
250 return 0;
251} 228}
252 229
253static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, 230static int get_max_bus_speed(struct hotplug_slot *hotplug_slot,
254 enum pci_bus_speed *value) 231 enum pci_bus_speed *value)
255{ 232{
256 struct slot *slot = hotplug_slot->private; 233 struct slot *slot = hotplug_slot->private;
257 int retval;
258 234
259 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 235 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
260 __func__, slot_name(slot)); 236 __func__, slot_name(slot));
261 237
262 retval = pciehp_get_max_link_speed(slot, value); 238 return pciehp_get_max_link_speed(slot, value);
263 if (retval < 0)
264 *value = PCI_SPEED_UNKNOWN;
265
266 return 0;
267} 239}
268 240
269static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value) 241static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_speed *value)
270{ 242{
271 struct slot *slot = hotplug_slot->private; 243 struct slot *slot = hotplug_slot->private;
272 int retval;
273 244
274 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", 245 ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
275 __func__, slot_name(slot)); 246 __func__, slot_name(slot));
276 247
277 retval = pciehp_get_cur_link_speed(slot, value); 248 return pciehp_get_cur_link_speed(slot, value);
278 if (retval < 0)
279 *value = PCI_SPEED_UNKNOWN;
280
281 return 0;
282} 249}
283 250
284static int pciehp_probe(struct pcie_device *dev) 251static int pciehp_probe(struct pcie_device *dev)
@@ -286,14 +253,13 @@ static int pciehp_probe(struct pcie_device *dev)
286 int rc; 253 int rc;
287 struct controller *ctrl; 254 struct controller *ctrl;
288 struct slot *slot; 255 struct slot *slot;
289 u8 value; 256 u8 occupied, poweron;
290 struct pci_dev *pdev = dev->port;
291 257
292 if (pciehp_force) 258 if (pciehp_force)
293 dev_info(&dev->device, 259 dev_info(&dev->device,
294 "Bypassing BIOS check for pciehp use on %s\n", 260 "Bypassing BIOS check for pciehp use on %s\n",
295 pci_name(pdev)); 261 pci_name(dev->port));
296 else if (pciehp_get_hp_hw_control_from_firmware(pdev)) 262 else if (pciehp_get_hp_hw_control_from_firmware(dev->port))
297 goto err_out_none; 263 goto err_out_none;
298 264
299 ctrl = pcie_init(dev); 265 ctrl = pcie_init(dev);
@@ -318,23 +284,18 @@ static int pciehp_probe(struct pcie_device *dev)
318 rc = pcie_init_notification(ctrl); 284 rc = pcie_init_notification(ctrl);
319 if (rc) { 285 if (rc) {
320 ctrl_err(ctrl, "Notification initialization failed\n"); 286 ctrl_err(ctrl, "Notification initialization failed\n");
321 goto err_out_release_ctlr; 287 goto err_out_free_ctrl_slot;
322 } 288 }
323 289
324 /* Check if slot is occupied */ 290 /* Check if slot is occupied */
325 slot = ctrl->slot; 291 slot = ctrl->slot;
326 pciehp_get_adapter_status(slot, &value); 292 pciehp_get_adapter_status(slot, &occupied);
327 if (value) { 293 pciehp_get_power_status(slot, &poweron);
328 if (pciehp_force) 294 if (occupied && pciehp_force)
329 pciehp_enable_slot(slot); 295 pciehp_enable_slot(slot);
330 } else { 296 /* If empty slot's power status is on, turn power off */
331 /* Power off slot if not occupied */ 297 if (!occupied && poweron && POWER_CTRL(ctrl))
332 if (POWER_CTRL(ctrl)) { 298 pciehp_power_off_slot(slot);
333 rc = pciehp_power_off_slot(slot);
334 if (rc)
335 goto err_out_free_ctrl_slot;
336 }
337 }
338 299
339 return 0; 300 return 0;
340 301
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 84487d126e4d..d6ac1b261dd9 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -142,23 +142,9 @@ u8 pciehp_handle_power_fault(struct slot *p_slot)
142 142
143 /* power fault */ 143 /* power fault */
144 ctrl_dbg(ctrl, "Power fault interrupt received\n"); 144 ctrl_dbg(ctrl, "Power fault interrupt received\n");
145 145 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
146 if (!pciehp_query_power_fault(p_slot)) { 146 event_type = INT_POWER_FAULT;
147 /* 147 ctrl_info(ctrl, "Power fault bit %x set\n", 0);
148 * power fault Cleared
149 */
150 ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n",
151 slot_name(p_slot));
152 event_type = INT_POWER_FAULT_CLEAR;
153 } else {
154 /*
155 * power fault
156 */
157 ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot));
158 event_type = INT_POWER_FAULT;
159 ctrl_info(ctrl, "Power fault bit %x set\n", 0);
160 }
161
162 queue_interrupt_event(p_slot, event_type); 148 queue_interrupt_event(p_slot, event_type);
163 149
164 return 1; 150 return 1;
@@ -224,13 +210,12 @@ static int board_added(struct slot *p_slot)
224 retval = pciehp_check_link_status(ctrl); 210 retval = pciehp_check_link_status(ctrl);
225 if (retval) { 211 if (retval) {
226 ctrl_err(ctrl, "Failed to check link status\n"); 212 ctrl_err(ctrl, "Failed to check link status\n");
227 set_slot_off(ctrl, p_slot); 213 goto err_exit;
228 return retval;
229 } 214 }
230 215
231 /* Check for a power fault */ 216 /* Check for a power fault */
232 if (pciehp_query_power_fault(p_slot)) { 217 if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
233 ctrl_dbg(ctrl, "Power fault detected\n"); 218 ctrl_err(ctrl, "Power fault on slot %s\n", slot_name(p_slot));
234 retval = -EIO; 219 retval = -EIO;
235 goto err_exit; 220 goto err_exit;
236 } 221 }
@@ -363,25 +348,6 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
363 mutex_unlock(&p_slot->lock); 348 mutex_unlock(&p_slot->lock);
364} 349}
365 350
366static int update_slot_info(struct slot *slot)
367{
368 struct hotplug_slot_info *info;
369 int result;
370
371 info = kmalloc(sizeof(*info), GFP_KERNEL);
372 if (!info)
373 return -ENOMEM;
374
375 pciehp_get_power_status(slot, &info->power_status);
376 pciehp_get_attention_status(slot, &info->attention_status);
377 pciehp_get_latch_status(slot, &info->latch_status);
378 pciehp_get_adapter_status(slot, &info->adapter_status);
379
380 result = pci_hp_change_slot_info(slot->hotplug_slot, info);
381 kfree (info);
382 return result;
383}
384
385/* 351/*
386 * Note: This function must be called with slot->lock held 352 * Note: This function must be called with slot->lock held
387 */ 353 */
@@ -442,7 +408,6 @@ static void handle_button_press_event(struct slot *p_slot)
442 * to hot-add or hot-remove is undergoing 408 * to hot-add or hot-remove is undergoing
443 */ 409 */
444 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); 410 ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot));
445 update_slot_info(p_slot);
446 break; 411 break;
447 default: 412 default:
448 ctrl_warn(ctrl, "Not a valid state\n"); 413 ctrl_warn(ctrl, "Not a valid state\n");
@@ -500,11 +465,9 @@ static void interrupt_event_handler(struct work_struct *work)
500 if (!HP_SUPR_RM(ctrl)) 465 if (!HP_SUPR_RM(ctrl))
501 break; 466 break;
502 ctrl_dbg(ctrl, "Surprise Removal\n"); 467 ctrl_dbg(ctrl, "Surprise Removal\n");
503 update_slot_info(p_slot);
504 handle_surprise_event(p_slot); 468 handle_surprise_event(p_slot);
505 break; 469 break;
506 default: 470 default:
507 update_slot_info(p_slot);
508 break; 471 break;
509 } 472 }
510 mutex_unlock(&p_slot->lock); 473 mutex_unlock(&p_slot->lock);
@@ -547,9 +510,6 @@ int pciehp_enable_slot(struct slot *p_slot)
547 if (rc) { 510 if (rc) {
548 pciehp_get_latch_status(p_slot, &getstatus); 511 pciehp_get_latch_status(p_slot, &getstatus);
549 } 512 }
550
551 update_slot_info(p_slot);
552
553 return rc; 513 return rc;
554} 514}
555 515
@@ -590,10 +550,7 @@ int pciehp_disable_slot(struct slot *p_slot)
590 } 550 }
591 } 551 }
592 552
593 ret = remove_board(p_slot); 553 return remove_board(p_slot);
594 update_slot_info(p_slot);
595
596 return ret;
597} 554}
598 555
599int pciehp_sysfs_enable_slot(struct slot *p_slot) 556int pciehp_sysfs_enable_slot(struct slot *p_slot)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9ef4605c1ef6..10040d58c8ef 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -45,25 +45,25 @@ static atomic_t pciehp_num_controllers = ATOMIC_INIT(0);
45static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value) 45static inline int pciehp_readw(struct controller *ctrl, int reg, u16 *value)
46{ 46{
47 struct pci_dev *dev = ctrl->pcie->port; 47 struct pci_dev *dev = ctrl->pcie->port;
48 return pci_read_config_word(dev, ctrl->cap_base + reg, value); 48 return pci_read_config_word(dev, pci_pcie_cap(dev) + reg, value);
49} 49}
50 50
51static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value) 51static inline int pciehp_readl(struct controller *ctrl, int reg, u32 *value)
52{ 52{
53 struct pci_dev *dev = ctrl->pcie->port; 53 struct pci_dev *dev = ctrl->pcie->port;
54 return pci_read_config_dword(dev, ctrl->cap_base + reg, value); 54 return pci_read_config_dword(dev, pci_pcie_cap(dev) + reg, value);
55} 55}
56 56
57static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value) 57static inline int pciehp_writew(struct controller *ctrl, int reg, u16 value)
58{ 58{
59 struct pci_dev *dev = ctrl->pcie->port; 59 struct pci_dev *dev = ctrl->pcie->port;
60 return pci_write_config_word(dev, ctrl->cap_base + reg, value); 60 return pci_write_config_word(dev, pci_pcie_cap(dev) + reg, value);
61} 61}
62 62
63static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) 63static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value)
64{ 64{
65 struct pci_dev *dev = ctrl->pcie->port; 65 struct pci_dev *dev = ctrl->pcie->port;
66 return pci_write_config_dword(dev, ctrl->cap_base + reg, value); 66 return pci_write_config_dword(dev, pci_pcie_cap(dev) + reg, value);
67} 67}
68 68
69/* Power Control Command */ 69/* Power Control Command */
@@ -318,8 +318,8 @@ int pciehp_get_attention_status(struct slot *slot, u8 *status)
318 return retval; 318 return retval;
319 } 319 }
320 320
321 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", 321 ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", __func__,
322 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); 322 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
323 323
324 atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6; 324 atten_led_state = (slot_ctrl & PCI_EXP_SLTCTL_AIC) >> 6;
325 325
@@ -356,8 +356,8 @@ int pciehp_get_power_status(struct slot *slot, u8 *status)
356 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); 356 ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__);
357 return retval; 357 return retval;
358 } 358 }
359 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", 359 ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", __func__,
360 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_ctrl); 360 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_ctrl);
361 361
362 pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10; 362 pwr_state = (slot_ctrl & PCI_EXP_SLTCTL_PCC) >> 10;
363 363
@@ -427,27 +427,24 @@ int pciehp_set_attention_status(struct slot *slot, u8 value)
427 struct controller *ctrl = slot->ctrl; 427 struct controller *ctrl = slot->ctrl;
428 u16 slot_cmd; 428 u16 slot_cmd;
429 u16 cmd_mask; 429 u16 cmd_mask;
430 int rc;
431 430
432 cmd_mask = PCI_EXP_SLTCTL_AIC; 431 cmd_mask = PCI_EXP_SLTCTL_AIC;
433 switch (value) { 432 switch (value) {
434 case 0 : /* turn off */ 433 case 0 : /* turn off */
435 slot_cmd = 0x00C0; 434 slot_cmd = 0x00C0;
436 break; 435 break;
437 case 1: /* turn on */ 436 case 1: /* turn on */
438 slot_cmd = 0x0040; 437 slot_cmd = 0x0040;
439 break; 438 break;
440 case 2: /* turn blink */ 439 case 2: /* turn blink */
441 slot_cmd = 0x0080; 440 slot_cmd = 0x0080;
442 break; 441 break;
443 default: 442 default:
444 return -1; 443 return -EINVAL;
445 } 444 }
446 rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 445 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
447 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 446 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
448 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 447 return pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
449
450 return rc;
451} 448}
452 449
453void pciehp_green_led_on(struct slot *slot) 450void pciehp_green_led_on(struct slot *slot)
@@ -459,8 +456,8 @@ void pciehp_green_led_on(struct slot *slot)
459 slot_cmd = 0x0100; 456 slot_cmd = 0x0100;
460 cmd_mask = PCI_EXP_SLTCTL_PIC; 457 cmd_mask = PCI_EXP_SLTCTL_PIC;
461 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 458 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
462 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 459 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
463 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 460 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
464} 461}
465 462
466void pciehp_green_led_off(struct slot *slot) 463void pciehp_green_led_off(struct slot *slot)
@@ -472,8 +469,8 @@ void pciehp_green_led_off(struct slot *slot)
472 slot_cmd = 0x0300; 469 slot_cmd = 0x0300;
473 cmd_mask = PCI_EXP_SLTCTL_PIC; 470 cmd_mask = PCI_EXP_SLTCTL_PIC;
474 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 471 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
475 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 472 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
476 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 473 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
477} 474}
478 475
479void pciehp_green_led_blink(struct slot *slot) 476void pciehp_green_led_blink(struct slot *slot)
@@ -485,8 +482,8 @@ void pciehp_green_led_blink(struct slot *slot)
485 slot_cmd = 0x0200; 482 slot_cmd = 0x0200;
486 cmd_mask = PCI_EXP_SLTCTL_PIC; 483 cmd_mask = PCI_EXP_SLTCTL_PIC;
487 pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 484 pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
488 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 485 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
489 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 486 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
490} 487}
491 488
492int pciehp_power_on_slot(struct slot * slot) 489int pciehp_power_on_slot(struct slot * slot)
@@ -514,97 +511,38 @@ int pciehp_power_on_slot(struct slot * slot)
514 return retval; 511 return retval;
515 } 512 }
516 } 513 }
514 ctrl->power_fault_detected = 0;
517 515
518 slot_cmd = POWER_ON; 516 slot_cmd = POWER_ON;
519 cmd_mask = PCI_EXP_SLTCTL_PCC; 517 cmd_mask = PCI_EXP_SLTCTL_PCC;
520 if (!pciehp_poll_mode) {
521 /* Enable power fault detection turned off at power off time */
522 slot_cmd |= PCI_EXP_SLTCTL_PFDE;
523 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
524 }
525
526 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 518 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
527 if (retval) { 519 if (retval) {
528 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); 520 ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd);
529 return retval; 521 return retval;
530 } 522 }
531 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 523 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
532 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 524 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
533 525
534 ctrl->power_fault_detected = 0;
535 return retval; 526 return retval;
536} 527}
537 528
538static inline int pcie_mask_bad_dllp(struct controller *ctrl)
539{
540 struct pci_dev *dev = ctrl->pcie->port;
541 int pos;
542 u32 reg;
543
544 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
545 if (!pos)
546 return 0;
547 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg);
548 if (reg & PCI_ERR_COR_BAD_DLLP)
549 return 0;
550 reg |= PCI_ERR_COR_BAD_DLLP;
551 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
552 return 1;
553}
554
555static inline void pcie_unmask_bad_dllp(struct controller *ctrl)
556{
557 struct pci_dev *dev = ctrl->pcie->port;
558 u32 reg;
559 int pos;
560
561 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
562 if (!pos)
563 return;
564 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg);
565 if (!(reg & PCI_ERR_COR_BAD_DLLP))
566 return;
567 reg &= ~PCI_ERR_COR_BAD_DLLP;
568 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg);
569}
570
571int pciehp_power_off_slot(struct slot * slot) 529int pciehp_power_off_slot(struct slot * slot)
572{ 530{
573 struct controller *ctrl = slot->ctrl; 531 struct controller *ctrl = slot->ctrl;
574 u16 slot_cmd; 532 u16 slot_cmd;
575 u16 cmd_mask; 533 u16 cmd_mask;
576 int retval = 0; 534 int retval;
577 int changed;
578
579 /*
580 * Set Bad DLLP Mask bit in Correctable Error Mask
581 * Register. This is the workaround against Bad DLLP error
582 * that sometimes happens during turning power off the slot
583 * which conforms to PCI Express 1.0a spec.
584 */
585 changed = pcie_mask_bad_dllp(ctrl);
586 535
587 slot_cmd = POWER_OFF; 536 slot_cmd = POWER_OFF;
588 cmd_mask = PCI_EXP_SLTCTL_PCC; 537 cmd_mask = PCI_EXP_SLTCTL_PCC;
589 if (!pciehp_poll_mode) {
590 /* Disable power fault detection */
591 slot_cmd &= ~PCI_EXP_SLTCTL_PFDE;
592 cmd_mask |= PCI_EXP_SLTCTL_PFDE;
593 }
594
595 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); 538 retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
596 if (retval) { 539 if (retval) {
597 ctrl_err(ctrl, "Write command failed!\n"); 540 ctrl_err(ctrl, "Write command failed!\n");
598 retval = -1; 541 return retval;
599 goto out;
600 } 542 }
601 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", 543 ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
602 __func__, ctrl->cap_base + PCI_EXP_SLTCTL, slot_cmd); 544 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
603 out: 545 return 0;
604 if (changed)
605 pcie_unmask_bad_dllp(ctrl);
606
607 return retval;
608} 546}
609 547
610static irqreturn_t pcie_isr(int irq, void *dev_id) 548static irqreturn_t pcie_isr(int irq, void *dev_id)
@@ -840,11 +778,19 @@ int pcie_enable_notification(struct controller *ctrl)
840{ 778{
841 u16 cmd, mask; 779 u16 cmd, mask;
842 780
781 /*
782 * TBD: Power fault detected software notification support.
783 *
784 * Power fault detected software notification is not enabled
785 * now, because it caused power fault detected interrupt storm
786 * on some machines. On those machines, power fault detected
787 * bit in the slot status register was set again immediately
788 * when it is cleared in the interrupt service routine, and
789 * next power fault detected interrupt was notified again.
790 */
843 cmd = PCI_EXP_SLTCTL_PDCE; 791 cmd = PCI_EXP_SLTCTL_PDCE;
844 if (ATTN_BUTTN(ctrl)) 792 if (ATTN_BUTTN(ctrl))
845 cmd |= PCI_EXP_SLTCTL_ABPE; 793 cmd |= PCI_EXP_SLTCTL_ABPE;
846 if (POWER_CTRL(ctrl))
847 cmd |= PCI_EXP_SLTCTL_PFDE;
848 if (MRL_SENS(ctrl)) 794 if (MRL_SENS(ctrl))
849 cmd |= PCI_EXP_SLTCTL_MRLSCE; 795 cmd |= PCI_EXP_SLTCTL_MRLSCE;
850 if (!pciehp_poll_mode) 796 if (!pciehp_poll_mode)
@@ -866,7 +812,8 @@ static void pcie_disable_notification(struct controller *ctrl)
866 u16 mask; 812 u16 mask;
867 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE | 813 mask = (PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE |
868 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE | 814 PCI_EXP_SLTCTL_MRLSCE | PCI_EXP_SLTCTL_PFDE |
869 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE); 815 PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
816 PCI_EXP_SLTCTL_DLLSCE);
870 if (pcie_write_cmd(ctrl, 0, mask)) 817 if (pcie_write_cmd(ctrl, 0, mask))
871 ctrl_warn(ctrl, "Cannot disable software notification\n"); 818 ctrl_warn(ctrl, "Cannot disable software notification\n");
872} 819}
@@ -934,7 +881,8 @@ static inline void dbg_ctrl(struct controller *ctrl)
934 pdev->subsystem_device); 881 pdev->subsystem_device);
935 ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", 882 ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n",
936 pdev->subsystem_vendor); 883 pdev->subsystem_vendor);
937 ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base); 884 ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n",
885 pci_pcie_cap(pdev));
938 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 886 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
939 if (!pci_resource_len(pdev, i)) 887 if (!pci_resource_len(pdev, i))
940 continue; 888 continue;
@@ -978,8 +926,7 @@ struct controller *pcie_init(struct pcie_device *dev)
978 goto abort; 926 goto abort;
979 } 927 }
980 ctrl->pcie = dev; 928 ctrl->pcie = dev;
981 ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); 929 if (!pci_pcie_cap(pdev)) {
982 if (!ctrl->cap_base) {
983 ctrl_err(ctrl, "Cannot find PCI Express capability\n"); 930 ctrl_err(ctrl, "Cannot find PCI Express capability\n");
984 goto abort_ctrl; 931 goto abort_ctrl;
985 } 932 }
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index cc8ec3aa41a7..80b461c98557 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -43,7 +43,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
43 * Perhaps we *should* use default settings for PCIe, but 43 * Perhaps we *should* use default settings for PCIe, but
44 * pciehp didn't, so we won't either. 44 * pciehp didn't, so we won't either.
45 */ 45 */
46 if (dev->is_pcie) 46 if (pci_is_pcie(dev))
47 return; 47 return;
48 dev_info(&dev->dev, "using default PCI settings\n"); 48 dev_info(&dev->dev, "using default PCI settings\n");
49 hpp = &pci_default_type0; 49 hpp = &pci_default_type0;
@@ -102,7 +102,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
102 return; 102 return;
103 103
104 /* Find PCI Express capability */ 104 /* Find PCI Express capability */
105 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 105 pos = pci_pcie_cap(dev);
106 if (!pos) 106 if (!pos)
107 return; 107 return;
108 108
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 9261327b49f3..8d6159426311 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -1611,7 +1611,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
1611 return ret; 1611 return ret;
1612 parent = parent->bus->self; 1612 parent = parent->bus->self;
1613 } 1613 }
1614 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 1614 if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
1615 return domain_context_mapping_one(domain, 1615 return domain_context_mapping_one(domain,
1616 pci_domain_nr(tmp->subordinate), 1616 pci_domain_nr(tmp->subordinate),
1617 tmp->subordinate->number, 0, 1617 tmp->subordinate->number, 0,
@@ -1651,7 +1651,7 @@ static int domain_context_mapped(struct pci_dev *pdev)
1651 return ret; 1651 return ret;
1652 parent = parent->bus->self; 1652 parent = parent->bus->self;
1653 } 1653 }
1654 if (tmp->is_pcie) 1654 if (pci_is_pcie(tmp))
1655 return device_context_mapped(iommu, tmp->subordinate->number, 1655 return device_context_mapped(iommu, tmp->subordinate->number,
1656 0); 1656 0);
1657 else 1657 else
@@ -1821,7 +1821,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1821 1821
1822 dev_tmp = pci_find_upstream_pcie_bridge(pdev); 1822 dev_tmp = pci_find_upstream_pcie_bridge(pdev);
1823 if (dev_tmp) { 1823 if (dev_tmp) {
1824 if (dev_tmp->is_pcie) { 1824 if (pci_is_pcie(dev_tmp)) {
1825 bus = dev_tmp->subordinate->number; 1825 bus = dev_tmp->subordinate->number;
1826 devfn = 0; 1826 devfn = 0;
1827 } else { 1827 } else {
@@ -2182,7 +2182,7 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
2182 * the 1:1 domain, just in _case_ one of their siblings turns out 2182 * the 1:1 domain, just in _case_ one of their siblings turns out
2183 * not to be able to map all of memory. 2183 * not to be able to map all of memory.
2184 */ 2184 */
2185 if (!pdev->is_pcie) { 2185 if (!pci_is_pcie(pdev)) {
2186 if (!pci_is_root_bus(pdev->bus)) 2186 if (!pci_is_root_bus(pdev->bus))
2187 return 0; 2187 return 0;
2188 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI) 2188 if (pdev->class >> 8 == PCI_CLASS_BRIDGE_PCI)
@@ -3319,7 +3319,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
3319 parent->devfn); 3319 parent->devfn);
3320 parent = parent->bus->self; 3320 parent = parent->bus->self;
3321 } 3321 }
3322 if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */ 3322 if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
3323 iommu_detach_dev(iommu, 3323 iommu_detach_dev(iommu,
3324 tmp->subordinate->number, 0); 3324 tmp->subordinate->number, 0);
3325 else /* this is a legacy PCI bridge */ 3325 else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 3b3658669bee..1487bf2be863 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -520,7 +520,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
520 return -1; 520 return -1;
521 521
522 /* PCIe device or Root Complex integrated PCI device */ 522 /* PCIe device or Root Complex integrated PCI device */
523 if (dev->is_pcie || !dev->bus->parent) { 523 if (pci_is_pcie(dev) || !dev->bus->parent) {
524 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, 524 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
525 (dev->bus->number << 8) | dev->devfn); 525 (dev->bus->number << 8) | dev->devfn);
526 return 0; 526 return 0;
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
528 528
529 bridge = pci_find_upstream_pcie_bridge(dev); 529 bridge = pci_find_upstream_pcie_bridge(dev);
530 if (bridge) { 530 if (bridge) {
531 if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */ 531 if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16, 532 set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
533 (bridge->bus->number << 8) | dev->bus->number); 533 (bridge->bus->number << 8) | dev->bus->number);
534 else /* this is a legacy PCI bridge */ 534 else /* this is a legacy PCI bridge */
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
new file mode 100644
index 000000000000..3e0d7b5dd1b9
--- /dev/null
+++ b/drivers/pci/ioapic.c
@@ -0,0 +1,127 @@
1/*
2 * IOAPIC/IOxAPIC/IOSAPIC driver
3 *
4 * Copyright (C) 2009 Fujitsu Limited.
5 * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12/*
13 * This driver manages PCI I/O APICs added by hotplug after boot. We try to
14 * claim all I/O APIC PCI devices, but those present at boot were registered
15 * when we parsed the ACPI MADT, so we'll fail when we try to re-register
16 * them.
17 */
18
19#include <linux/pci.h>
20#include <linux/acpi.h>
21#include <acpi/acpi_bus.h>
22
23struct ioapic {
24 acpi_handle handle;
25 u32 gsi_base;
26};
27
28static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
29{
30 acpi_handle handle;
31 acpi_status status;
32 unsigned long long gsb;
33 struct ioapic *ioapic;
34 u64 addr;
35 int ret;
36 char *type;
37
38 handle = DEVICE_ACPI_HANDLE(&dev->dev);
39 if (!handle)
40 return -EINVAL;
41
42 status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
43 if (ACPI_FAILURE(status))
44 return -EINVAL;
45
46 /*
47 * The previous code in acpiphp evaluated _MAT if _GSB failed, but
48 * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs.
49 */
50
51 ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
52 if (!ioapic)
53 return -ENOMEM;
54
55 ioapic->handle = handle;
56 ioapic->gsi_base = (u32) gsb;
57
58 if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC)
59 type = "IOAPIC";
60 else
61 type = "IOxAPIC";
62
63 ret = pci_enable_device(dev);
64 if (ret < 0)
65 goto exit_free;
66
67 pci_set_master(dev);
68
69 if (pci_request_region(dev, 0, type))
70 goto exit_disable;
71
72 addr = pci_resource_start(dev, 0);
73 if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base))
74 goto exit_release;
75
76 pci_set_drvdata(dev, ioapic);
77 dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr,
78 ioapic->gsi_base);
79 return 0;
80
81exit_release:
82 pci_release_region(dev, 0);
83exit_disable:
84 pci_disable_device(dev);
85exit_free:
86 kfree(ioapic);
87 return -ENODEV;
88}
89
90static void ioapic_remove(struct pci_dev *dev)
91{
92 struct ioapic *ioapic = pci_get_drvdata(dev);
93
94 acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base);
95 pci_release_region(dev, 0);
96 pci_disable_device(dev);
97 kfree(ioapic);
98}
99
100
101static struct pci_device_id ioapic_devices[] = {
102 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
103 PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
104 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
105 PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
106 { }
107};
108
109static struct pci_driver ioapic_driver = {
110 .name = "ioapic",
111 .id_table = ioapic_devices,
112 .probe = ioapic_probe,
113 .remove = __devexit_p(ioapic_remove),
114};
115
116static int __init ioapic_init(void)
117{
118 return pci_register_driver(&ioapic_driver);
119}
120
121static void __exit ioapic_exit(void)
122{
123 pci_unregister_driver(&ioapic_driver);
124}
125
126module_init(ioapic_init);
127module_exit(ioapic_exit);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index e03fe98f0619..b2a448e19fe6 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -555,7 +555,7 @@ int pci_iov_init(struct pci_dev *dev)
555{ 555{
556 int pos; 556 int pos;
557 557
558 if (!dev->is_pcie) 558 if (!pci_is_pcie(dev))
559 return -ENODEV; 559 return -ENODEV;
560 560
561 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); 561 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 33317df47699..cc617ddd33d0 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -116,7 +116,7 @@ static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
116 int ret; 116 int ret;
117 117
118 ret = acpi_pm_device_sleep_wake(&bridge->dev, enable); 118 ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
119 if (!ret || bridge->is_pcie) 119 if (!ret || pci_is_pcie(bridge))
120 return; 120 return;
121 bus = bus->parent; 121 bus = bus->parent;
122 } 122 }
@@ -131,7 +131,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
131 if (acpi_pci_can_wakeup(dev)) 131 if (acpi_pci_can_wakeup(dev))
132 return acpi_pm_device_sleep_wake(&dev->dev, enable); 132 return acpi_pm_device_sleep_wake(&dev->dev, enable);
133 133
134 if (!dev->is_pcie) 134 if (!pci_is_pcie(dev))
135 acpi_pci_propagate_wakeup_enable(dev->bus, enable); 135 acpi_pci_propagate_wakeup_enable(dev->bus, enable);
136 136
137 return 0; 137 return 0;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 0f6382f090ee..c5df94e86678 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -74,7 +74,11 @@ static ssize_t local_cpus_show(struct device *dev,
74 const struct cpumask *mask; 74 const struct cpumask *mask;
75 int len; 75 int len;
76 76
77#ifdef CONFIG_NUMA
78 mask = cpumask_of_node(dev_to_node(dev));
79#else
77 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 80 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
81#endif
78 len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask); 82 len = cpumask_scnprintf(buf, PAGE_SIZE-2, mask);
79 buf[len++] = '\n'; 83 buf[len++] = '\n';
80 buf[len] = '\0'; 84 buf[len] = '\0';
@@ -88,7 +92,11 @@ static ssize_t local_cpulist_show(struct device *dev,
88 const struct cpumask *mask; 92 const struct cpumask *mask;
89 int len; 93 int len;
90 94
95#ifdef CONFIG_NUMA
96 mask = cpumask_of_node(dev_to_node(dev));
97#else
91 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); 98 mask = cpumask_of_pcibus(to_pci_dev(dev)->bus);
99#endif
92 len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask); 100 len = cpulist_scnprintf(buf, PAGE_SIZE-2, mask);
93 buf[len++] = '\n'; 101 buf[len++] = '\n';
94 buf[len] = '\0'; 102 buf[len] = '\0';
@@ -176,6 +184,21 @@ numa_node_show(struct device *dev, struct device_attribute *attr, char *buf)
176#endif 184#endif
177 185
178static ssize_t 186static ssize_t
187dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf)
188{
189 struct pci_dev *pdev = to_pci_dev(dev);
190
191 return sprintf (buf, "%d\n", fls64(pdev->dma_mask));
192}
193
194static ssize_t
195consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr,
196 char *buf)
197{
198 return sprintf (buf, "%d\n", fls64(dev->coherent_dma_mask));
199}
200
201static ssize_t
179msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) 202msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf)
180{ 203{
181 struct pci_dev *pdev = to_pci_dev(dev); 204 struct pci_dev *pdev = to_pci_dev(dev);
@@ -306,6 +329,8 @@ struct device_attribute pci_dev_attrs[] = {
306#ifdef CONFIG_NUMA 329#ifdef CONFIG_NUMA
307 __ATTR_RO(numa_node), 330 __ATTR_RO(numa_node),
308#endif 331#endif
332 __ATTR_RO(dma_mask_bits),
333 __ATTR_RO(consistent_dma_mask_bits),
309 __ATTR(enable, 0600, is_enabled_show, is_enabled_store), 334 __ATTR(enable, 0600, is_enabled_show, is_enabled_store),
310 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR), 335 __ATTR(broken_parity_status,(S_IRUGO|S_IWUSR),
311 broken_parity_status_show,broken_parity_status_store), 336 broken_parity_status_show,broken_parity_status_store),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 4e4c295a049f..0bc27e059019 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -47,6 +47,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
47unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 47unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
48unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 48unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
49 49
50/*
51 * The default CLS is used if arch didn't set CLS explicitly and not
52 * all pci devices agree on the same value. Arch can override either
53 * the dfl or actual value as it sees fit. Don't forget this is
54 * measured in 32-bit words, not bytes.
55 */
56u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
57u8 pci_cache_line_size;
58
50/** 59/**
51 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 60 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
52 * @bus: pointer to PCI bus structure to search 61 * @bus: pointer to PCI bus structure to search
@@ -373,8 +382,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
373 continue; /* Wrong type */ 382 continue; /* Wrong type */
374 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 383 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
375 return r; /* Exact match */ 384 return r; /* Exact match */
376 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH)) 385 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
377 best = r; /* Approximating prefetchable by non-prefetchable */ 386 if (r->flags & IORESOURCE_PREFETCH)
387 continue;
388 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
389 if (!best)
390 best = r;
378 } 391 }
379 return best; 392 return best;
380} 393}
@@ -728,8 +741,8 @@ static int pci_save_pcie_state(struct pci_dev *dev)
728 u16 *cap; 741 u16 *cap;
729 u16 flags; 742 u16 flags;
730 743
731 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 744 pos = pci_pcie_cap(dev);
732 if (pos <= 0) 745 if (!pos)
733 return 0; 746 return 0;
734 747
735 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 748 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -837,7 +850,7 @@ pci_save_state(struct pci_dev *dev)
837 int i; 850 int i;
838 /* XXX: 100% dword access ok here? */ 851 /* XXX: 100% dword access ok here? */
839 for (i = 0; i < 16; i++) 852 for (i = 0; i < 16; i++)
840 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]); 853 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
841 dev->state_saved = true; 854 dev->state_saved = true;
842 if ((i = pci_save_pcie_state(dev)) != 0) 855 if ((i = pci_save_pcie_state(dev)) != 0)
843 return i; 856 return i;
@@ -1202,7 +1215,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
1202 1215
1203 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1216 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1204 1217
1205 dev_printk(KERN_INFO, &dev->dev, "PME# %s\n", 1218 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1206 enable ? "enabled" : "disabled"); 1219 enable ? "enabled" : "disabled");
1207} 1220}
1208 1221
@@ -1413,7 +1426,8 @@ void pci_pm_init(struct pci_dev *dev)
1413 1426
1414 pmc &= PCI_PM_CAP_PME_MASK; 1427 pmc &= PCI_PM_CAP_PME_MASK;
1415 if (pmc) { 1428 if (pmc) {
1416 dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", 1429 dev_printk(KERN_DEBUG, &dev->dev,
1430 "PME# supported from%s%s%s%s%s\n",
1417 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1431 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1418 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1432 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1419 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1433 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -1510,7 +1524,7 @@ void pci_enable_ari(struct pci_dev *dev)
1510 u16 ctrl; 1524 u16 ctrl;
1511 struct pci_dev *bridge; 1525 struct pci_dev *bridge;
1512 1526
1513 if (!dev->is_pcie || dev->devfn) 1527 if (!pci_is_pcie(dev) || dev->devfn)
1514 return; 1528 return;
1515 1529
1516 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1530 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
@@ -1518,10 +1532,10 @@ void pci_enable_ari(struct pci_dev *dev)
1518 return; 1532 return;
1519 1533
1520 bridge = dev->bus->self; 1534 bridge = dev->bus->self;
1521 if (!bridge || !bridge->is_pcie) 1535 if (!bridge || !pci_is_pcie(bridge))
1522 return; 1536 return;
1523 1537
1524 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); 1538 pos = pci_pcie_cap(bridge);
1525 if (!pos) 1539 if (!pos)
1526 return; 1540 return;
1527 1541
@@ -1536,6 +1550,54 @@ void pci_enable_ari(struct pci_dev *dev)
1536 bridge->ari_enabled = 1; 1550 bridge->ari_enabled = 1;
1537} 1551}
1538 1552
1553static int pci_acs_enable;
1554
1555/**
1556 * pci_request_acs - ask for ACS to be enabled if supported
1557 */
1558void pci_request_acs(void)
1559{
1560 pci_acs_enable = 1;
1561}
1562
1563/**
1564 * pci_enable_acs - enable ACS if hardware support it
1565 * @dev: the PCI device
1566 */
1567void pci_enable_acs(struct pci_dev *dev)
1568{
1569 int pos;
1570 u16 cap;
1571 u16 ctrl;
1572
1573 if (!pci_acs_enable)
1574 return;
1575
1576 if (!pci_is_pcie(dev))
1577 return;
1578
1579 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
1580 if (!pos)
1581 return;
1582
1583 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
1584 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
1585
1586 /* Source Validation */
1587 ctrl |= (cap & PCI_ACS_SV);
1588
1589 /* P2P Request Redirect */
1590 ctrl |= (cap & PCI_ACS_RR);
1591
1592 /* P2P Completion Redirect */
1593 ctrl |= (cap & PCI_ACS_CR);
1594
1595 /* Upstream Forwarding */
1596 ctrl |= (cap & PCI_ACS_UF);
1597
1598 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
1599}
1600
1539/** 1601/**
1540 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1602 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
1541 * @dev: the PCI device 1603 * @dev: the PCI device
@@ -1669,9 +1731,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
1669 return 0; 1731 return 0;
1670 1732
1671err_out: 1733err_out:
1672 dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", 1734 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
1673 bar,
1674 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
1675 &pdev->resource[bar]); 1735 &pdev->resource[bar]);
1676 return -EBUSY; 1736 return -EBUSY;
1677} 1737}
@@ -1866,31 +1926,6 @@ void pci_clear_master(struct pci_dev *dev)
1866 __pci_set_master(dev, false); 1926 __pci_set_master(dev, false);
1867} 1927}
1868 1928
1869#ifdef PCI_DISABLE_MWI
1870int pci_set_mwi(struct pci_dev *dev)
1871{
1872 return 0;
1873}
1874
1875int pci_try_set_mwi(struct pci_dev *dev)
1876{
1877 return 0;
1878}
1879
1880void pci_clear_mwi(struct pci_dev *dev)
1881{
1882}
1883
1884#else
1885
1886#ifndef PCI_CACHE_LINE_BYTES
1887#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
1888#endif
1889
1890/* This can be overridden by arch code. */
1891/* Don't forget this is measured in 32-bit words, not bytes */
1892u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
1893
1894/** 1929/**
1895 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 1930 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
1896 * @dev: the PCI device for which MWI is to be enabled 1931 * @dev: the PCI device for which MWI is to be enabled
@@ -1901,13 +1936,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
1901 * 1936 *
1902 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 1937 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
1903 */ 1938 */
1904static int 1939int pci_set_cacheline_size(struct pci_dev *dev)
1905pci_set_cacheline_size(struct pci_dev *dev)
1906{ 1940{
1907 u8 cacheline_size; 1941 u8 cacheline_size;
1908 1942
1909 if (!pci_cache_line_size) 1943 if (!pci_cache_line_size)
1910 return -EINVAL; /* The system doesn't support MWI. */ 1944 return -EINVAL;
1911 1945
1912 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 1946 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
1913 equal to or multiple of the right value. */ 1947 equal to or multiple of the right value. */
@@ -1928,6 +1962,24 @@ pci_set_cacheline_size(struct pci_dev *dev)
1928 1962
1929 return -EINVAL; 1963 return -EINVAL;
1930} 1964}
1965EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
1966
1967#ifdef PCI_DISABLE_MWI
1968int pci_set_mwi(struct pci_dev *dev)
1969{
1970 return 0;
1971}
1972
1973int pci_try_set_mwi(struct pci_dev *dev)
1974{
1975 return 0;
1976}
1977
1978void pci_clear_mwi(struct pci_dev *dev)
1979{
1980}
1981
1982#else
1931 1983
1932/** 1984/**
1933 * pci_set_mwi - enables memory-write-invalidate PCI transaction 1985 * pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2062,6 +2114,7 @@ pci_set_dma_mask(struct pci_dev *dev, u64 mask)
2062 return -EIO; 2114 return -EIO;
2063 2115
2064 dev->dma_mask = mask; 2116 dev->dma_mask = mask;
2117 dev_dbg(&dev->dev, "using %dbit DMA mask\n", fls64(mask));
2065 2118
2066 return 0; 2119 return 0;
2067} 2120}
@@ -2073,6 +2126,7 @@ pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
2073 return -EIO; 2126 return -EIO;
2074 2127
2075 dev->dev.coherent_dma_mask = mask; 2128 dev->dev.coherent_dma_mask = mask;
2129 dev_dbg(&dev->dev, "using %dbit consistent DMA mask\n", fls64(mask));
2076 2130
2077 return 0; 2131 return 0;
2078} 2132}
@@ -2099,9 +2153,9 @@ static int pcie_flr(struct pci_dev *dev, int probe)
2099 int i; 2153 int i;
2100 int pos; 2154 int pos;
2101 u32 cap; 2155 u32 cap;
2102 u16 status; 2156 u16 status, control;
2103 2157
2104 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 2158 pos = pci_pcie_cap(dev);
2105 if (!pos) 2159 if (!pos)
2106 return -ENOTTY; 2160 return -ENOTTY;
2107 2161
@@ -2126,8 +2180,10 @@ static int pcie_flr(struct pci_dev *dev, int probe)
2126 "proceeding with reset anyway\n"); 2180 "proceeding with reset anyway\n");
2127 2181
2128clear: 2182clear:
2129 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, 2183 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2130 PCI_EXP_DEVCTL_BCR_FLR); 2184 control |= PCI_EXP_DEVCTL_BCR_FLR;
2185 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2186
2131 msleep(100); 2187 msleep(100);
2132 2188
2133 return 0; 2189 return 0;
@@ -2450,7 +2506,7 @@ int pcie_get_readrq(struct pci_dev *dev)
2450 int ret, cap; 2506 int ret, cap;
2451 u16 ctl; 2507 u16 ctl;
2452 2508
2453 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2509 cap = pci_pcie_cap(dev);
2454 if (!cap) 2510 if (!cap)
2455 return -EINVAL; 2511 return -EINVAL;
2456 2512
@@ -2480,7 +2536,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
2480 2536
2481 v = (ffs(rq) - 8) << 12; 2537 v = (ffs(rq) - 8) << 12;
2482 2538
2483 cap = pci_find_capability(dev, PCI_CAP_ID_EXP); 2539 cap = pci_pcie_cap(dev);
2484 if (!cap) 2540 if (!cap)
2485 goto out; 2541 goto out;
2486 2542
@@ -2540,7 +2596,7 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
2540 return reg; 2596 return reg;
2541 } 2597 }
2542 2598
2543 dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno); 2599 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
2544 return 0; 2600 return 0;
2545} 2601}
2546 2602
@@ -2590,7 +2646,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
2590 2646
2591#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2647#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
2592static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2648static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
2593spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED; 2649static DEFINE_SPINLOCK(resource_alignment_lock);
2594 2650
2595/** 2651/**
2596 * pci_specified_resource_alignment - get resource alignment specified by user. 2652 * pci_specified_resource_alignment - get resource alignment specified by user.
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d92d1954a2fb..33ed8e0aba1e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -311,4 +311,6 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
311 return resource_alignment(res); 311 return resource_alignment(res);
312} 312}
313 313
314extern void pci_enable_acs(struct pci_dev *dev);
315
314#endif /* DRIVERS_PCI_H */ 316#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 62d15f652bb6..7fcd5331b14c 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -23,6 +23,7 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/uaccess.h> 25#include <linux/uaccess.h>
26#include <linux/stddef.h>
26#include "aerdrv.h" 27#include "aerdrv.h"
27 28
28struct aer_error_inj { 29struct aer_error_inj {
@@ -35,10 +36,12 @@ struct aer_error_inj {
35 u32 header_log1; 36 u32 header_log1;
36 u32 header_log2; 37 u32 header_log2;
37 u32 header_log3; 38 u32 header_log3;
39 u16 domain;
38}; 40};
39 41
40struct aer_error { 42struct aer_error {
41 struct list_head list; 43 struct list_head list;
44 u16 domain;
42 unsigned int bus; 45 unsigned int bus;
43 unsigned int devfn; 46 unsigned int devfn;
44 int pos_cap_err; 47 int pos_cap_err;
@@ -66,22 +69,27 @@ static LIST_HEAD(pci_bus_ops_list);
66/* Protect einjected and pci_bus_ops_list */ 69/* Protect einjected and pci_bus_ops_list */
67static DEFINE_SPINLOCK(inject_lock); 70static DEFINE_SPINLOCK(inject_lock);
68 71
69static void aer_error_init(struct aer_error *err, unsigned int bus, 72static void aer_error_init(struct aer_error *err, u16 domain,
70 unsigned int devfn, int pos_cap_err) 73 unsigned int bus, unsigned int devfn,
74 int pos_cap_err)
71{ 75{
72 INIT_LIST_HEAD(&err->list); 76 INIT_LIST_HEAD(&err->list);
77 err->domain = domain;
73 err->bus = bus; 78 err->bus = bus;
74 err->devfn = devfn; 79 err->devfn = devfn;
75 err->pos_cap_err = pos_cap_err; 80 err->pos_cap_err = pos_cap_err;
76} 81}
77 82
78/* inject_lock must be held before calling */ 83/* inject_lock must be held before calling */
79static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn) 84static struct aer_error *__find_aer_error(u16 domain, unsigned int bus,
85 unsigned int devfn)
80{ 86{
81 struct aer_error *err; 87 struct aer_error *err;
82 88
83 list_for_each_entry(err, &einjected, list) { 89 list_for_each_entry(err, &einjected, list) {
84 if (bus == err->bus && devfn == err->devfn) 90 if (domain == err->domain &&
91 bus == err->bus &&
92 devfn == err->devfn)
85 return err; 93 return err;
86 } 94 }
87 return NULL; 95 return NULL;
@@ -90,7 +98,10 @@ static struct aer_error *__find_aer_error(unsigned int bus, unsigned int devfn)
90/* inject_lock must be held before calling */ 98/* inject_lock must be held before calling */
91static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev) 99static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
92{ 100{
93 return __find_aer_error(dev->bus->number, dev->devfn); 101 int domain = pci_domain_nr(dev->bus);
102 if (domain < 0)
103 return NULL;
104 return __find_aer_error((u16)domain, dev->bus->number, dev->devfn);
94} 105}
95 106
96/* inject_lock must be held before calling */ 107/* inject_lock must be held before calling */
@@ -172,11 +183,15 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
172 struct aer_error *err; 183 struct aer_error *err;
173 unsigned long flags; 184 unsigned long flags;
174 struct pci_ops *ops; 185 struct pci_ops *ops;
186 int domain;
175 187
176 spin_lock_irqsave(&inject_lock, flags); 188 spin_lock_irqsave(&inject_lock, flags);
177 if (size != sizeof(u32)) 189 if (size != sizeof(u32))
178 goto out; 190 goto out;
179 err = __find_aer_error(bus->number, devfn); 191 domain = pci_domain_nr(bus);
192 if (domain < 0)
193 goto out;
194 err = __find_aer_error((u16)domain, bus->number, devfn);
180 if (!err) 195 if (!err)
181 goto out; 196 goto out;
182 197
@@ -200,11 +215,15 @@ int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where, int size,
200 unsigned long flags; 215 unsigned long flags;
201 int rw1cs; 216 int rw1cs;
202 struct pci_ops *ops; 217 struct pci_ops *ops;
218 int domain;
203 219
204 spin_lock_irqsave(&inject_lock, flags); 220 spin_lock_irqsave(&inject_lock, flags);
205 if (size != sizeof(u32)) 221 if (size != sizeof(u32))
206 goto out; 222 goto out;
207 err = __find_aer_error(bus->number, devfn); 223 domain = pci_domain_nr(bus);
224 if (domain < 0)
225 goto out;
226 err = __find_aer_error((u16)domain, bus->number, devfn);
208 if (!err) 227 if (!err)
209 goto out; 228 goto out;
210 229
@@ -262,7 +281,7 @@ out:
262static struct pci_dev *pcie_find_root_port(struct pci_dev *dev) 281static struct pci_dev *pcie_find_root_port(struct pci_dev *dev)
263{ 282{
264 while (1) { 283 while (1) {
265 if (!dev->is_pcie) 284 if (!pci_is_pcie(dev))
266 break; 285 break;
267 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) 286 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
268 return dev; 287 return dev;
@@ -305,25 +324,25 @@ static int aer_inject(struct aer_error_inj *einj)
305 u32 sever; 324 u32 sever;
306 int ret = 0; 325 int ret = 0;
307 326
308 dev = pci_get_bus_and_slot(einj->bus, devfn); 327 dev = pci_get_domain_bus_and_slot((int)einj->domain, einj->bus, devfn);
309 if (!dev) 328 if (!dev)
310 return -EINVAL; 329 return -ENODEV;
311 rpdev = pcie_find_root_port(dev); 330 rpdev = pcie_find_root_port(dev);
312 if (!rpdev) { 331 if (!rpdev) {
313 ret = -EINVAL; 332 ret = -ENOTTY;
314 goto out_put; 333 goto out_put;
315 } 334 }
316 335
317 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 336 pos_cap_err = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
318 if (!pos_cap_err) { 337 if (!pos_cap_err) {
319 ret = -EIO; 338 ret = -ENOTTY;
320 goto out_put; 339 goto out_put;
321 } 340 }
322 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever); 341 pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
323 342
324 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR); 343 rp_pos_cap_err = pci_find_ext_capability(rpdev, PCI_EXT_CAP_ID_ERR);
325 if (!rp_pos_cap_err) { 344 if (!rp_pos_cap_err) {
326 ret = -EIO; 345 ret = -ENOTTY;
327 goto out_put; 346 goto out_put;
328 } 347 }
329 348
@@ -344,7 +363,8 @@ static int aer_inject(struct aer_error_inj *einj)
344 if (!err) { 363 if (!err) {
345 err = err_alloc; 364 err = err_alloc;
346 err_alloc = NULL; 365 err_alloc = NULL;
347 aer_error_init(err, einj->bus, devfn, pos_cap_err); 366 aer_error_init(err, einj->domain, einj->bus, devfn,
367 pos_cap_err);
348 list_add(&err->list, &einjected); 368 list_add(&err->list, &einjected);
349 } 369 }
350 err->uncor_status |= einj->uncor_status; 370 err->uncor_status |= einj->uncor_status;
@@ -358,7 +378,8 @@ static int aer_inject(struct aer_error_inj *einj)
358 if (!rperr) { 378 if (!rperr) {
359 rperr = rperr_alloc; 379 rperr = rperr_alloc;
360 rperr_alloc = NULL; 380 rperr_alloc = NULL;
361 aer_error_init(rperr, rpdev->bus->number, rpdev->devfn, 381 aer_error_init(rperr, pci_domain_nr(rpdev->bus),
382 rpdev->bus->number, rpdev->devfn,
362 rp_pos_cap_err); 383 rp_pos_cap_err);
363 list_add(&rperr->list, &einjected); 384 list_add(&rperr->list, &einjected);
364 } 385 }
@@ -411,10 +432,11 @@ static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
411 432
412 if (!capable(CAP_SYS_ADMIN)) 433 if (!capable(CAP_SYS_ADMIN))
413 return -EPERM; 434 return -EPERM;
414 435 if (usize < offsetof(struct aer_error_inj, domain) ||
415 if (usize != sizeof(struct aer_error_inj)) 436 usize > sizeof(einj))
416 return -EINVAL; 437 return -EINVAL;
417 438
439 memset(&einj, 0, sizeof(einj));
418 if (copy_from_user(&einj, ubuf, usize)) 440 if (copy_from_user(&einj, ubuf, usize))
419 return -EFAULT; 441 return -EFAULT;
420 442
@@ -452,7 +474,7 @@ static void __exit aer_inject_exit(void)
452 } 474 }
453 475
454 spin_lock_irqsave(&inject_lock, flags); 476 spin_lock_irqsave(&inject_lock, flags);
455 list_for_each_entry_safe(err, err_next, &pci_bus_ops_list, list) { 477 list_for_each_entry_safe(err, err_next, &einjected, list) {
456 list_del(&err->list); 478 list_del(&err->list);
457 kfree(err); 479 kfree(err);
458 } 480 }
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 40c3cc5d1caf..97a345927b55 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -53,7 +53,7 @@ static struct pci_error_handlers aer_error_handlers = {
53 53
54static struct pcie_port_service_driver aerdriver = { 54static struct pcie_port_service_driver aerdriver = {
55 .name = "aer", 55 .name = "aer",
56 .port_type = PCIE_RC_PORT, 56 .port_type = PCI_EXP_TYPE_ROOT_PORT,
57 .service = PCIE_PORT_SERVICE_AER, 57 .service = PCIE_PORT_SERVICE_AER,
58 58
59 .probe = aer_probe, 59 .probe = aer_probe,
@@ -295,7 +295,7 @@ static void aer_error_resume(struct pci_dev *dev)
295 u16 reg16; 295 u16 reg16;
296 296
297 /* Clean up Root device status */ 297 /* Clean up Root device status */
298 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 298 pos = pci_pcie_cap(dev);
299 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16); 299 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &reg16);
300 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); 300 pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16);
301 301
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9f5ccbeb4fa5..ae672ca80333 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -35,11 +35,14 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
35 u16 reg16 = 0; 35 u16 reg16 = 0;
36 int pos; 36 int pos;
37 37
38 if (dev->aer_firmware_first)
39 return -EIO;
40
38 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 41 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
39 if (!pos) 42 if (!pos)
40 return -EIO; 43 return -EIO;
41 44
42 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 45 pos = pci_pcie_cap(dev);
43 if (!pos) 46 if (!pos)
44 return -EIO; 47 return -EIO;
45 48
@@ -60,7 +63,10 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
60 u16 reg16 = 0; 63 u16 reg16 = 0;
61 int pos; 64 int pos;
62 65
63 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 66 if (dev->aer_firmware_first)
67 return -EIO;
68
69 pos = pci_pcie_cap(dev);
64 if (!pos) 70 if (!pos)
65 return -EIO; 71 return -EIO;
66 72
@@ -78,48 +84,27 @@ EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
78int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 84int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
79{ 85{
80 int pos; 86 int pos;
81 u32 status, mask; 87 u32 status;
82 88
83 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 89 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
84 if (!pos) 90 if (!pos)
85 return -EIO; 91 return -EIO;
86 92
87 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 93 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
88 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 94 if (status)
89 if (dev->error_state == pci_channel_io_normal) 95 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
90 status &= ~mask; /* Clear corresponding nonfatal bits */
91 else
92 status &= mask; /* Clear corresponding fatal bits */
93 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
94 96
95 return 0; 97 return 0;
96} 98}
97EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); 99EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
98 100
99#if 0
100int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
101{
102 int pos;
103 u32 status;
104
105 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
106 if (!pos)
107 return -EIO;
108
109 pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
110 pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
111
112 return 0;
113}
114#endif /* 0 */
115
116static int set_device_error_reporting(struct pci_dev *dev, void *data) 101static int set_device_error_reporting(struct pci_dev *dev, void *data)
117{ 102{
118 bool enable = *((bool *)data); 103 bool enable = *((bool *)data);
119 104
120 if (dev->pcie_type == PCIE_RC_PORT || 105 if ((dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) ||
121 dev->pcie_type == PCIE_SW_UPSTREAM_PORT || 106 (dev->pcie_type == PCI_EXP_TYPE_UPSTREAM) ||
122 dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) { 107 (dev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)) {
123 if (enable) 108 if (enable)
124 pci_enable_pcie_error_reporting(dev); 109 pci_enable_pcie_error_reporting(dev);
125 else 110 else
@@ -218,7 +203,7 @@ static int find_device_iter(struct pci_dev *dev, void *data)
218 */ 203 */
219 if (atomic_read(&dev->enable_cnt) == 0) 204 if (atomic_read(&dev->enable_cnt) == 0)
220 return 0; 205 return 0;
221 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 206 pos = pci_pcie_cap(dev);
222 if (!pos) 207 if (!pos)
223 return 0; 208 return 0;
224 /* Check if AER is enabled */ 209 /* Check if AER is enabled */
@@ -431,10 +416,9 @@ static int find_aer_service_iter(struct device *device, void *data)
431 result = (struct find_aer_service_data *) data; 416 result = (struct find_aer_service_data *) data;
432 417
433 if (device->bus == &pcie_port_bus_type) { 418 if (device->bus == &pcie_port_bus_type) {
434 struct pcie_port_data *port_data; 419 struct pcie_device *pcie = to_pcie_device(device);
435 420
436 port_data = pci_get_drvdata(to_pcie_device(device)->port); 421 if (pcie->port->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
437 if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
438 result->is_downstream = 1; 422 result->is_downstream = 1;
439 423
440 driver = device->driver; 424 driver = device->driver;
@@ -612,7 +596,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
612 u16 reg16; 596 u16 reg16;
613 u32 reg32; 597 u32 reg32;
614 598
615 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 599 pos = pci_pcie_cap(pdev);
616 /* Clear PCIE Capability's Device Status */ 600 /* Clear PCIE Capability's Device Status */
617 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16); 601 pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
618 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16); 602 pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
@@ -874,8 +858,22 @@ void aer_delete_rootport(struct aer_rpc *rpc)
874 */ 858 */
875int aer_init(struct pcie_device *dev) 859int aer_init(struct pcie_device *dev)
876{ 860{
877 if (aer_osc_setup(dev) && !forceload) 861 if (dev->port->aer_firmware_first) {
878 return -ENXIO; 862 dev_printk(KERN_DEBUG, &dev->device,
863 "PCIe errors handled by platform firmware.\n");
864 goto out;
865 }
866
867 if (aer_osc_setup(dev))
868 goto out;
879 869
880 return 0; 870 return 0;
871out:
872 if (forceload) {
873 dev_printk(KERN_DEBUG, &dev->device,
874 "aerdrv forceload requested.\n");
875 dev->port->aer_firmware_first = 0;
876 return 0;
877 }
878 return -ENXIO;
881} 879}
diff --git a/drivers/pci/pcie/aer/ecrc.c b/drivers/pci/pcie/aer/ecrc.c
index a928d8ab6bda..a2747a663bc9 100644
--- a/drivers/pci/pcie/aer/ecrc.c
+++ b/drivers/pci/pcie/aer/ecrc.c
@@ -51,7 +51,7 @@ static int enable_ecrc_checking(struct pci_dev *dev)
51 int pos; 51 int pos;
52 u32 reg32; 52 u32 reg32;
53 53
54 if (!dev->is_pcie) 54 if (!pci_is_pcie(dev))
55 return -ENODEV; 55 return -ENODEV;
56 56
57 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 57 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -79,7 +79,7 @@ static int disable_ecrc_checking(struct pci_dev *dev)
79 int pos; 79 int pos;
80 u32 reg32; 80 u32 reg32;
81 81
82 if (!dev->is_pcie) 82 if (!pci_is_pcie(dev))
83 return -ENODEV; 83 return -ENODEV;
84 84
85 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 85 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5b7056cec00c..5a01fc7fbf05 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -122,7 +122,7 @@ static void pcie_set_clkpm_nocheck(struct pcie_link_state *link, int enable)
122 struct pci_bus *linkbus = link->pdev->subordinate; 122 struct pci_bus *linkbus = link->pdev->subordinate;
123 123
124 list_for_each_entry(child, &linkbus->devices, bus_list) { 124 list_for_each_entry(child, &linkbus->devices, bus_list) {
125 pos = pci_find_capability(child, PCI_CAP_ID_EXP); 125 pos = pci_pcie_cap(child);
126 if (!pos) 126 if (!pos)
127 return; 127 return;
128 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16); 128 pci_read_config_word(child, pos + PCI_EXP_LNKCTL, &reg16);
@@ -156,7 +156,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
156 156
157 /* All functions should have the same cap and state, take the worst */ 157 /* All functions should have the same cap and state, take the worst */
158 list_for_each_entry(child, &linkbus->devices, bus_list) { 158 list_for_each_entry(child, &linkbus->devices, bus_list) {
159 pos = pci_find_capability(child, PCI_CAP_ID_EXP); 159 pos = pci_pcie_cap(child);
160 if (!pos) 160 if (!pos)
161 return; 161 return;
162 pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32); 162 pci_read_config_dword(child, pos + PCI_EXP_LNKCAP, &reg32);
@@ -191,23 +191,23 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
191 * Configuration, so just check one function 191 * Configuration, so just check one function
192 */ 192 */
193 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list); 193 child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
194 BUG_ON(!child->is_pcie); 194 BUG_ON(!pci_is_pcie(child));
195 195
196 /* Check downstream component if bit Slot Clock Configuration is 1 */ 196 /* Check downstream component if bit Slot Clock Configuration is 1 */
197 cpos = pci_find_capability(child, PCI_CAP_ID_EXP); 197 cpos = pci_pcie_cap(child);
198 pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16); 198 pci_read_config_word(child, cpos + PCI_EXP_LNKSTA, &reg16);
199 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 199 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
200 same_clock = 0; 200 same_clock = 0;
201 201
202 /* Check upstream component if bit Slot Clock Configuration is 1 */ 202 /* Check upstream component if bit Slot Clock Configuration is 1 */
203 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); 203 ppos = pci_pcie_cap(parent);
204 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16); 204 pci_read_config_word(parent, ppos + PCI_EXP_LNKSTA, &reg16);
205 if (!(reg16 & PCI_EXP_LNKSTA_SLC)) 205 if (!(reg16 & PCI_EXP_LNKSTA_SLC))
206 same_clock = 0; 206 same_clock = 0;
207 207
208 /* Configure downstream component, all functions */ 208 /* Configure downstream component, all functions */
209 list_for_each_entry(child, &linkbus->devices, bus_list) { 209 list_for_each_entry(child, &linkbus->devices, bus_list) {
210 cpos = pci_find_capability(child, PCI_CAP_ID_EXP); 210 cpos = pci_pcie_cap(child);
211 pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16); 211 pci_read_config_word(child, cpos + PCI_EXP_LNKCTL, &reg16);
212 child_reg[PCI_FUNC(child->devfn)] = reg16; 212 child_reg[PCI_FUNC(child->devfn)] = reg16;
213 if (same_clock) 213 if (same_clock)
@@ -247,7 +247,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
247 dev_printk(KERN_ERR, &parent->dev, 247 dev_printk(KERN_ERR, &parent->dev,
248 "ASPM: Could not configure common clock\n"); 248 "ASPM: Could not configure common clock\n");
249 list_for_each_entry(child, &linkbus->devices, bus_list) { 249 list_for_each_entry(child, &linkbus->devices, bus_list) {
250 cpos = pci_find_capability(child, PCI_CAP_ID_EXP); 250 cpos = pci_pcie_cap(child);
251 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL, 251 pci_write_config_word(child, cpos + PCI_EXP_LNKCTL,
252 child_reg[PCI_FUNC(child->devfn)]); 252 child_reg[PCI_FUNC(child->devfn)]);
253 } 253 }
@@ -300,7 +300,7 @@ static void pcie_get_aspm_reg(struct pci_dev *pdev,
300 u16 reg16; 300 u16 reg16;
301 u32 reg32; 301 u32 reg32;
302 302
303 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 303 pos = pci_pcie_cap(pdev);
304 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32); 304 pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
305 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10; 305 info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
306 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12; 306 info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
@@ -420,7 +420,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
420 child->pcie_type != PCI_EXP_TYPE_LEG_END) 420 child->pcie_type != PCI_EXP_TYPE_LEG_END)
421 continue; 421 continue;
422 422
423 pos = pci_find_capability(child, PCI_CAP_ID_EXP); 423 pos = pci_pcie_cap(child);
424 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32); 424 pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
425 /* Calculate endpoint L0s acceptable latency */ 425 /* Calculate endpoint L0s acceptable latency */
426 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6; 426 encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
@@ -436,7 +436,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
436static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) 436static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
437{ 437{
438 u16 reg16; 438 u16 reg16;
439 int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 439 int pos = pci_pcie_cap(pdev);
440 440
441 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16); 441 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
442 reg16 &= ~0x3; 442 reg16 &= ~0x3;
@@ -503,7 +503,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
503 * very strange. Disable ASPM for the whole slot 503 * very strange. Disable ASPM for the whole slot
504 */ 504 */
505 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { 505 list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
506 pos = pci_find_capability(child, PCI_CAP_ID_EXP); 506 pos = pci_pcie_cap(child);
507 if (!pos) 507 if (!pos)
508 return -EINVAL; 508 return -EINVAL;
509 /* 509 /*
@@ -563,7 +563,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
563 struct pcie_link_state *link; 563 struct pcie_link_state *link;
564 int blacklist = !!pcie_aspm_sanity_check(pdev); 564 int blacklist = !!pcie_aspm_sanity_check(pdev);
565 565
566 if (aspm_disabled || !pdev->is_pcie || pdev->link_state) 566 if (aspm_disabled || !pci_is_pcie(pdev) || pdev->link_state)
567 return; 567 return;
568 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 568 if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
569 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) 569 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
@@ -629,7 +629,8 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
629 struct pci_dev *parent = pdev->bus->self; 629 struct pci_dev *parent = pdev->bus->self;
630 struct pcie_link_state *link, *root, *parent_link; 630 struct pcie_link_state *link, *root, *parent_link;
631 631
632 if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state) 632 if (aspm_disabled || !pci_is_pcie(pdev) ||
633 !parent || !parent->link_state)
633 return; 634 return;
634 if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 635 if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
635 (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 636 (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -670,7 +671,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
670{ 671{
671 struct pcie_link_state *link = pdev->link_state; 672 struct pcie_link_state *link = pdev->link_state;
672 673
673 if (aspm_disabled || !pdev->is_pcie || !link) 674 if (aspm_disabled || !pci_is_pcie(pdev) || !link)
674 return; 675 return;
675 if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) && 676 if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
676 (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)) 677 (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -696,7 +697,7 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
696 struct pci_dev *parent = pdev->bus->self; 697 struct pci_dev *parent = pdev->bus->self;
697 struct pcie_link_state *link; 698 struct pcie_link_state *link;
698 699
699 if (aspm_disabled || !pdev->is_pcie) 700 if (aspm_disabled || !pci_is_pcie(pdev))
700 return; 701 return;
701 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT || 702 if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
702 pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM) 703 pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
@@ -841,8 +842,9 @@ void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev)
841{ 842{
842 struct pcie_link_state *link_state = pdev->link_state; 843 struct pcie_link_state *link_state = pdev->link_state;
843 844
844 if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 845 if (!pci_is_pcie(pdev) ||
845 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 846 (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
847 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
846 return; 848 return;
847 849
848 if (link_state->aspm_support) 850 if (link_state->aspm_support)
@@ -857,8 +859,9 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
857{ 859{
858 struct pcie_link_state *link_state = pdev->link_state; 860 struct pcie_link_state *link_state = pdev->link_state;
859 861
860 if (!pdev->is_pcie || (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT && 862 if (!pci_is_pcie(pdev) ||
861 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state) 863 (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
864 pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM) || !link_state)
862 return; 865 return;
863 866
864 if (link_state->aspm_support) 867 if (link_state->aspm_support)
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 17ad53868f9f..aaeb9d21cba5 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -11,31 +11,16 @@
11 11
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13 13
14#if !defined(PCI_CAP_ID_PME) 14#define PCIE_PORT_DEVICE_MAXSERVICES 4
15#define PCI_CAP_ID_PME 1
16#endif
17
18#if !defined(PCI_CAP_ID_EXP)
19#define PCI_CAP_ID_EXP 0x10
20#endif
21
22#define PORT_TYPE_MASK 0xf
23#define PORT_TO_SLOT_MASK 0x100
24#define SLOT_HP_CAPABLE_MASK 0x40
25#define PCIE_CAPABILITIES_REG 0x2
26#define PCIE_SLOT_CAPABILITIES_REG 0x14
27#define PCIE_PORT_DEVICE_MAXSERVICES 4
28#define PCIE_PORT_MSI_VECTOR_MASK 0x1f
29/* 15/*
30 * According to the PCI Express Base Specification 2.0, the indices of the MSI-X 16 * According to the PCI Express Base Specification 2.0, the indices of
31 * table entires used by port services must not exceed 31 17 * the MSI-X table entires used by port services must not exceed 31
32 */ 18 */
33#define PCIE_PORT_MAX_MSIX_ENTRIES 32 19#define PCIE_PORT_MAX_MSIX_ENTRIES 32
34 20
35#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 21#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
36 22
37extern struct bus_type pcie_port_bus_type; 23extern struct bus_type pcie_port_bus_type;
38extern int pcie_port_device_probe(struct pci_dev *dev);
39extern int pcie_port_device_register(struct pci_dev *dev); 24extern int pcie_port_device_register(struct pci_dev *dev);
40#ifdef CONFIG_PM 25#ifdef CONFIG_PM
41extern int pcie_port_device_suspend(struct device *dev); 26extern int pcie_port_device_suspend(struct device *dev);
diff --git a/drivers/pci/pcie/portdrv_bus.c b/drivers/pci/pcie/portdrv_bus.c
index ef3a4eeaebb4..18bf90f748f6 100644
--- a/drivers/pci/pcie/portdrv_bus.c
+++ b/drivers/pci/pcie/portdrv_bus.c
@@ -26,7 +26,6 @@ EXPORT_SYMBOL_GPL(pcie_port_bus_type);
26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) 26static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
27{ 27{
28 struct pcie_device *pciedev; 28 struct pcie_device *pciedev;
29 struct pcie_port_data *port_data;
30 struct pcie_port_service_driver *driver; 29 struct pcie_port_service_driver *driver;
31 30
32 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) 31 if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type)
@@ -38,10 +37,8 @@ static int pcie_port_bus_match(struct device *dev, struct device_driver *drv)
38 if (driver->service != pciedev->service) 37 if (driver->service != pciedev->service)
39 return 0; 38 return 0;
40 39
41 port_data = pci_get_drvdata(pciedev->port); 40 if ((driver->port_type != PCIE_ANY_PORT) &&
42 41 (driver->port_type != pciedev->port->pcie_type))
43 if (driver->port_type != PCIE_ANY_PORT
44 && driver->port_type != port_data->port_type)
45 return 0; 42 return 0;
46 43
47 return 1; 44 return 1;
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 52f84fca9f7d..413262eb95b7 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -108,9 +108,9 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
108 * the value in this field indicates which MSI-X Table entry is 108 * the value in this field indicates which MSI-X Table entry is
109 * used to generate the interrupt message." 109 * used to generate the interrupt message."
110 */ 110 */
111 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 111 pos = pci_pcie_cap(dev);
112 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16); 112 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
113 entry = (reg16 >> 9) & PCIE_PORT_MSI_VECTOR_MASK; 113 entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
114 if (entry >= nr_entries) 114 if (entry >= nr_entries)
115 goto Error; 115 goto Error;
116 116
@@ -177,37 +177,40 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
177} 177}
178 178
179/** 179/**
180 * assign_interrupt_mode - choose interrupt mode for PCI Express port services 180 * init_service_irqs - initialize irqs for PCI Express port services
181 * (INTx, MSI-X, MSI) and set up vectors
182 * @dev: PCI Express port to handle 181 * @dev: PCI Express port to handle
183 * @vectors: Array of interrupt vectors to populate 182 * @irqs: Array of irqs to populate
184 * @mask: Bitmask of port capabilities returned by get_port_device_capability() 183 * @mask: Bitmask of port capabilities returned by get_port_device_capability()
185 * 184 *
186 * Return value: Interrupt mode associated with the port 185 * Return value: Interrupt mode associated with the port
187 */ 186 */
188static int assign_interrupt_mode(struct pci_dev *dev, int *vectors, int mask) 187static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
189{ 188{
190 int irq, interrupt_mode = PCIE_PORT_NO_IRQ; 189 int i, irq;
191 int i;
192 190
193 /* Try to use MSI-X if supported */ 191 /* Try to use MSI-X if supported */
194 if (!pcie_port_enable_msix(dev, vectors, mask)) 192 if (!pcie_port_enable_msix(dev, irqs, mask))
195 return PCIE_PORT_MSIX_MODE; 193 return 0;
196
197 /* We're not going to use MSI-X, so try MSI and fall back to INTx */ 194 /* We're not going to use MSI-X, so try MSI and fall back to INTx */
198 if (!pci_enable_msi(dev)) 195 irq = -1;
199 interrupt_mode = PCIE_PORT_MSI_MODE; 196 if (!pci_enable_msi(dev) || dev->pin)
200 197 irq = dev->irq;
201 if (interrupt_mode == PCIE_PORT_NO_IRQ && dev->pin)
202 interrupt_mode = PCIE_PORT_INTx_MODE;
203 198
204 irq = interrupt_mode != PCIE_PORT_NO_IRQ ? dev->irq : -1;
205 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) 199 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
206 vectors[i] = irq; 200 irqs[i] = irq;
201 irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
207 202
208 vectors[PCIE_PORT_SERVICE_VC_SHIFT] = -1; 203 if (irq < 0)
204 return -ENODEV;
205 return 0;
206}
209 207
210 return interrupt_mode; 208static void cleanup_service_irqs(struct pci_dev *dev)
209{
210 if (dev->msix_enabled)
211 pci_disable_msix(dev);
212 else if (dev->msi_enabled)
213 pci_disable_msi(dev);
211} 214}
212 215
213/** 216/**
@@ -226,13 +229,12 @@ static int get_port_device_capability(struct pci_dev *dev)
226 u16 reg16; 229 u16 reg16;
227 u32 reg32; 230 u32 reg32;
228 231
229 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 232 pos = pci_pcie_cap(dev);
230 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg16); 233 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16);
231 /* Hot-Plug Capable */ 234 /* Hot-Plug Capable */
232 if (reg16 & PORT_TO_SLOT_MASK) { 235 if (reg16 & PCI_EXP_FLAGS_SLOT) {
233 pci_read_config_dword(dev, 236 pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32);
234 pos + PCIE_SLOT_CAPABILITIES_REG, &reg32); 237 if (reg32 & PCI_EXP_SLTCAP_HPC)
235 if (reg32 & SLOT_HP_CAPABLE_MASK)
236 services |= PCIE_PORT_SERVICE_HP; 238 services |= PCIE_PORT_SERVICE_HP;
237 } 239 }
238 /* AER capable */ 240 /* AER capable */
@@ -241,80 +243,47 @@ static int get_port_device_capability(struct pci_dev *dev)
241 /* VC support */ 243 /* VC support */
242 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) 244 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC))
243 services |= PCIE_PORT_SERVICE_VC; 245 services |= PCIE_PORT_SERVICE_VC;
246 /* Root ports are capable of generating PME too */
247 if (dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT)
248 services |= PCIE_PORT_SERVICE_PME;
244 249
245 return services; 250 return services;
246} 251}
247 252
248/** 253/**
249 * pcie_device_init - initialize PCI Express port service device 254 * pcie_device_init - allocate and initialize PCI Express port service device
250 * @dev: Port service device to initialize 255 * @pdev: PCI Express port to associate the service device with
251 * @parent: PCI Express port to associate the service device with 256 * @service: Type of service to associate with the service device
252 * @port_type: Type of the port
253 * @service_type: Type of service to associate with the service device
254 * @irq: Interrupt vector to associate with the service device 257 * @irq: Interrupt vector to associate with the service device
255 */ 258 */
256static void pcie_device_init(struct pci_dev *parent, struct pcie_device *dev, 259static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
257 int service_type, int irq)
258{ 260{
259 struct pcie_port_data *port_data = pci_get_drvdata(parent); 261 int retval;
262 struct pcie_device *pcie;
260 struct device *device; 263 struct device *device;
261 int port_type = port_data->port_type;
262 264
263 dev->port = parent; 265 pcie = kzalloc(sizeof(*pcie), GFP_KERNEL);
264 dev->irq = irq; 266 if (!pcie)
265 dev->service = service_type; 267 return -ENOMEM;
268 pcie->port = pdev;
269 pcie->irq = irq;
270 pcie->service = service;
266 271
267 /* Initialize generic device interface */ 272 /* Initialize generic device interface */
268 device = &dev->device; 273 device = &pcie->device;
269 memset(device, 0, sizeof(struct device));
270 device->bus = &pcie_port_bus_type; 274 device->bus = &pcie_port_bus_type;
271 device->driver = NULL;
272 dev_set_drvdata(device, NULL);
273 device->release = release_pcie_device; /* callback to free pcie dev */ 275 device->release = release_pcie_device; /* callback to free pcie dev */
274 dev_set_name(device, "%s:pcie%02x", 276 dev_set_name(device, "%s:pcie%02x",
275 pci_name(parent), get_descriptor_id(port_type, service_type)); 277 pci_name(pdev),
276 device->parent = &parent->dev; 278 get_descriptor_id(pdev->pcie_type, service));
277} 279 device->parent = &pdev->dev;
278 280
279/** 281 retval = device_register(device);
280 * alloc_pcie_device - allocate PCI Express port service device structure 282 if (retval)
281 * @parent: PCI Express port to associate the service device with 283 kfree(pcie);
282 * @port_type: Type of the port 284 else
283 * @service_type: Type of service to associate with the service device 285 get_device(device);
284 * @irq: Interrupt vector to associate with the service device 286 return retval;
285 */
286static struct pcie_device* alloc_pcie_device(struct pci_dev *parent,
287 int service_type, int irq)
288{
289 struct pcie_device *device;
290
291 device = kzalloc(sizeof(struct pcie_device), GFP_KERNEL);
292 if (!device)
293 return NULL;
294
295 pcie_device_init(parent, device, service_type, irq);
296 return device;
297}
298
299/**
300 * pcie_port_device_probe - check if device is a PCI Express port
301 * @dev: Device to check
302 */
303int pcie_port_device_probe(struct pci_dev *dev)
304{
305 int pos, type;
306 u16 reg;
307
308 if (!(pos = pci_find_capability(dev, PCI_CAP_ID_EXP)))
309 return -ENODEV;
310
311 pci_read_config_word(dev, pos + PCIE_CAPABILITIES_REG, &reg);
312 type = (reg >> 4) & PORT_TYPE_MASK;
313 if ( type == PCIE_RC_PORT || type == PCIE_SW_UPSTREAM_PORT ||
314 type == PCIE_SW_DOWNSTREAM_PORT )
315 return 0;
316
317 return -ENODEV;
318} 287}
319 288
320/** 289/**
@@ -326,77 +295,49 @@ int pcie_port_device_probe(struct pci_dev *dev)
326 */ 295 */
327int pcie_port_device_register(struct pci_dev *dev) 296int pcie_port_device_register(struct pci_dev *dev)
328{ 297{
329 struct pcie_port_data *port_data; 298 int status, capabilities, i, nr_service;
330 int status, capabilities, irq_mode, i, nr_serv; 299 int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
331 int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
332 u16 reg16;
333
334 port_data = kzalloc(sizeof(*port_data), GFP_KERNEL);
335 if (!port_data)
336 return -ENOMEM;
337 pci_set_drvdata(dev, port_data);
338
339 /* Get port type */
340 pci_read_config_word(dev,
341 pci_find_capability(dev, PCI_CAP_ID_EXP) +
342 PCIE_CAPABILITIES_REG, &reg16);
343 port_data->port_type = (reg16 >> 4) & PORT_TYPE_MASK;
344 300
301 /* Get and check PCI Express port services */
345 capabilities = get_port_device_capability(dev); 302 capabilities = get_port_device_capability(dev);
346 /* Root ports are capable of generating PME too */ 303 if (!capabilities)
347 if (port_data->port_type == PCIE_RC_PORT) 304 return -ENODEV;
348 capabilities |= PCIE_PORT_SERVICE_PME;
349
350 irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
351 if (irq_mode == PCIE_PORT_NO_IRQ) {
352 /*
353 * Don't use service devices that require interrupts if there is
354 * no way to generate them.
355 */
356 if (!(capabilities & PCIE_PORT_SERVICE_VC)) {
357 status = -ENODEV;
358 goto Error;
359 }
360 capabilities = PCIE_PORT_SERVICE_VC;
361 }
362 port_data->port_irq_mode = irq_mode;
363 305
306 /* Enable PCI Express port device */
364 status = pci_enable_device(dev); 307 status = pci_enable_device(dev);
365 if (status) 308 if (status)
366 goto Error; 309 return status;
367 pci_set_master(dev); 310 pci_set_master(dev);
311 /*
312 * Initialize service irqs. Don't use service devices that
313 * require interrupts if there is no way to generate them.
314 */
315 status = init_service_irqs(dev, irqs, capabilities);
316 if (status) {
317 capabilities &= PCIE_PORT_SERVICE_VC;
318 if (!capabilities)
319 goto error_disable;
320 }
368 321
369 /* Allocate child services if any */ 322 /* Allocate child services if any */
370 for (i = 0, nr_serv = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 323 status = -ENODEV;
371 struct pcie_device *child; 324 nr_service = 0;
325 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
372 int service = 1 << i; 326 int service = 1 << i;
373
374 if (!(capabilities & service)) 327 if (!(capabilities & service))
375 continue; 328 continue;
376 329 if (!pcie_device_init(dev, service, irqs[i]))
377 child = alloc_pcie_device(dev, service, vectors[i]); 330 nr_service++;
378 if (!child)
379 continue;
380
381 status = device_register(&child->device);
382 if (status) {
383 kfree(child);
384 continue;
385 }
386
387 get_device(&child->device);
388 nr_serv++;
389 }
390 if (!nr_serv) {
391 pci_disable_device(dev);
392 status = -ENODEV;
393 goto Error;
394 } 331 }
332 if (!nr_service)
333 goto error_cleanup_irqs;
395 334
396 return 0; 335 return 0;
397 336
398 Error: 337error_cleanup_irqs:
399 kfree(port_data); 338 cleanup_service_irqs(dev);
339error_disable:
340 pci_disable_device(dev);
400 return status; 341 return status;
401} 342}
402 343
@@ -464,21 +405,9 @@ static int remove_iter(struct device *dev, void *data)
464 */ 405 */
465void pcie_port_device_remove(struct pci_dev *dev) 406void pcie_port_device_remove(struct pci_dev *dev)
466{ 407{
467 struct pcie_port_data *port_data = pci_get_drvdata(dev);
468
469 device_for_each_child(&dev->dev, NULL, remove_iter); 408 device_for_each_child(&dev->dev, NULL, remove_iter);
409 cleanup_service_irqs(dev);
470 pci_disable_device(dev); 410 pci_disable_device(dev);
471
472 switch (port_data->port_irq_mode) {
473 case PCIE_PORT_MSIX_MODE:
474 pci_disable_msix(dev);
475 break;
476 case PCIE_PORT_MSI_MODE:
477 pci_disable_msi(dev);
478 break;
479 }
480
481 kfree(port_data);
482} 411}
483 412
484/** 413/**
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index f635e476d632..ce52ea34fee5 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -67,14 +67,16 @@ static struct dev_pm_ops pcie_portdrv_pm_ops = {
67 * this port device. 67 * this port device.
68 * 68 *
69 */ 69 */
70static int __devinit pcie_portdrv_probe (struct pci_dev *dev, 70static int __devinit pcie_portdrv_probe(struct pci_dev *dev,
71 const struct pci_device_id *id ) 71 const struct pci_device_id *id)
72{ 72{
73 int status; 73 int status;
74 74
75 status = pcie_port_device_probe(dev); 75 if (!pci_is_pcie(dev) ||
76 if (status) 76 ((dev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
77 return status; 77 (dev->pcie_type != PCI_EXP_TYPE_UPSTREAM) &&
78 (dev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)))
79 return -ENODEV;
78 80
79 if (!dev->irq && dev->pin) { 81 if (!dev->irq && dev->pin) {
80 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " 82 dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; "
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 8105e32117f6..98ffb2de22e9 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/pci-aspm.h> 12#include <linux/pci-aspm.h>
13#include <acpi/acpi_hest.h>
13#include "pci.h" 14#include "pci.h"
14 15
15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 16#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -163,12 +164,12 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
163{ 164{
164 u32 l, sz, mask; 165 u32 l, sz, mask;
165 166
166 mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0; 167 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
167 168
168 res->name = pci_name(dev); 169 res->name = pci_name(dev);
169 170
170 pci_read_config_dword(dev, pos, &l); 171 pci_read_config_dword(dev, pos, &l);
171 pci_write_config_dword(dev, pos, mask); 172 pci_write_config_dword(dev, pos, l | mask);
172 pci_read_config_dword(dev, pos, &sz); 173 pci_read_config_dword(dev, pos, &sz);
173 pci_write_config_dword(dev, pos, l); 174 pci_write_config_dword(dev, pos, l);
174 175
@@ -223,9 +224,13 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
223 goto fail; 224 goto fail;
224 225
225 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { 226 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
226 dev_err(&dev->dev, "can't handle 64-bit BAR\n"); 227 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
228 pos);
227 goto fail; 229 goto fail;
228 } else if ((sizeof(resource_size_t) < 8) && l) { 230 }
231
232 res->flags |= IORESOURCE_MEM_64;
233 if ((sizeof(resource_size_t) < 8) && l) {
229 /* Address above 32-bit boundary; disable the BAR */ 234 /* Address above 32-bit boundary; disable the BAR */
230 pci_write_config_dword(dev, pos, 0); 235 pci_write_config_dword(dev, pos, 0);
231 pci_write_config_dword(dev, pos + 4, 0); 236 pci_write_config_dword(dev, pos + 4, 0);
@@ -234,14 +239,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
234 } else { 239 } else {
235 res->start = l64; 240 res->start = l64;
236 res->end = l64 + sz64; 241 res->end = l64 + sz64;
237 dev_printk(KERN_DEBUG, &dev->dev, 242 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
238 "reg %x %s: %pR\n", pos, 243 pos, res);
239 (res->flags & IORESOURCE_PREFETCH) ?
240 "64bit mmio pref" : "64bit mmio",
241 res);
242 } 244 }
243
244 res->flags |= IORESOURCE_MEM_64;
245 } else { 245 } else {
246 sz = pci_size(l, sz, mask); 246 sz = pci_size(l, sz, mask);
247 247
@@ -251,11 +251,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
251 res->start = l; 251 res->start = l;
252 res->end = l + sz; 252 res->end = l + sz;
253 253
254 dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, 254 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
255 (res->flags & IORESOURCE_IO) ? "io port" :
256 ((res->flags & IORESOURCE_PREFETCH) ?
257 "32bit mmio pref" : "32bit mmio"),
258 res);
259 } 255 }
260 256
261 out: 257 out:
@@ -297,8 +293,11 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
297 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ 293 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
298 return; 294 return;
299 295
296 dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
297 child->secondary, child->subordinate,
298 dev->transparent ? " (subtractive decode)": "");
299
300 if (dev->transparent) { 300 if (dev->transparent) {
301 dev_info(&dev->dev, "transparent bridge\n");
302 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++) 301 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
303 child->resource[i] = child->parent->resource[i - 3]; 302 child->resource[i] = child->parent->resource[i - 3];
304 } 303 }
@@ -323,7 +322,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
323 res->start = base; 322 res->start = base;
324 if (!res->end) 323 if (!res->end)
325 res->end = limit + 0xfff; 324 res->end = limit + 0xfff;
326 dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res); 325 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
327 } 326 }
328 327
329 res = child->resource[1]; 328 res = child->resource[1];
@@ -335,8 +334,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
335 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; 334 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
336 res->start = base; 335 res->start = base;
337 res->end = limit + 0xfffff; 336 res->end = limit + 0xfffff;
338 dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n", 337 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
339 res);
340 } 338 }
341 339
342 res = child->resource[2]; 340 res = child->resource[2];
@@ -375,9 +373,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
375 res->flags |= IORESOURCE_MEM_64; 373 res->flags |= IORESOURCE_MEM_64;
376 res->start = base; 374 res->start = base;
377 res->end = limit + 0xfffff; 375 res->end = limit + 0xfffff;
378 dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", 376 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
379 (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
380 res);
381 } 377 }
382} 378}
383 379
@@ -651,13 +647,14 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
651 (child->number > bus->subordinate) || 647 (child->number > bus->subordinate) ||
652 (child->number < bus->number) || 648 (child->number < bus->number) ||
653 (child->subordinate < bus->number)) { 649 (child->subordinate < bus->number)) {
654 pr_debug("PCI: Bus #%02x (-#%02x) is %s " 650 dev_info(&child->dev, "[bus %02x-%02x] %s "
655 "hidden behind%s bridge #%02x (-#%02x)\n", 651 "hidden behind%s bridge %s [bus %02x-%02x]\n",
656 child->number, child->subordinate, 652 child->number, child->subordinate,
657 (bus->number > child->subordinate && 653 (bus->number > child->subordinate &&
658 bus->subordinate < child->number) ? 654 bus->subordinate < child->number) ?
659 "wholly" : "partially", 655 "wholly" : "partially",
660 bus->self->transparent ? " transparent" : "", 656 bus->self->transparent ? " transparent" : "",
657 dev_name(&bus->dev),
661 bus->number, bus->subordinate); 658 bus->number, bus->subordinate);
662 } 659 }
663 bus = bus->parent; 660 bus = bus->parent;
@@ -693,6 +690,7 @@ static void set_pcie_port_type(struct pci_dev *pdev)
693 if (!pos) 690 if (!pos)
694 return; 691 return;
695 pdev->is_pcie = 1; 692 pdev->is_pcie = 1;
693 pdev->pcie_cap = pos;
696 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 694 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
697 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; 695 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
698} 696}
@@ -703,7 +701,7 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
703 u16 reg16; 701 u16 reg16;
704 u32 reg32; 702 u32 reg32;
705 703
706 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 704 pos = pci_pcie_cap(pdev);
707 if (!pos) 705 if (!pos)
708 return; 706 return;
709 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); 707 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
@@ -714,6 +712,12 @@ static void set_pcie_hotplug_bridge(struct pci_dev *pdev)
714 pdev->is_hotplug_bridge = 1; 712 pdev->is_hotplug_bridge = 1;
715} 713}
716 714
715static void set_pci_aer_firmware_first(struct pci_dev *pdev)
716{
717 if (acpi_hest_firmware_first_pci(pdev))
718 pdev->aer_firmware_first = 1;
719}
720
717#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 721#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
718 722
719/** 723/**
@@ -731,6 +735,7 @@ int pci_setup_device(struct pci_dev *dev)
731 u32 class; 735 u32 class;
732 u8 hdr_type; 736 u8 hdr_type;
733 struct pci_slot *slot; 737 struct pci_slot *slot;
738 int pos = 0;
734 739
735 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type)) 740 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
736 return -EIO; 741 return -EIO;
@@ -742,6 +747,7 @@ int pci_setup_device(struct pci_dev *dev)
742 dev->multifunction = !!(hdr_type & 0x80); 747 dev->multifunction = !!(hdr_type & 0x80);
743 dev->error_state = pci_channel_io_normal; 748 dev->error_state = pci_channel_io_normal;
744 set_pcie_port_type(dev); 749 set_pcie_port_type(dev);
750 set_pci_aer_firmware_first(dev);
745 751
746 list_for_each_entry(slot, &dev->bus->slots, list) 752 list_for_each_entry(slot, &dev->bus->slots, list)
747 if (PCI_SLOT(dev->devfn) == slot->number) 753 if (PCI_SLOT(dev->devfn) == slot->number)
@@ -822,6 +828,11 @@ int pci_setup_device(struct pci_dev *dev)
822 dev->transparent = ((dev->class & 0xff) == 1); 828 dev->transparent = ((dev->class & 0xff) == 1);
823 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); 829 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
824 set_pcie_hotplug_bridge(dev); 830 set_pcie_hotplug_bridge(dev);
831 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
832 if (pos) {
833 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
834 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
835 }
825 break; 836 break;
826 837
827 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ 838 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
@@ -907,7 +918,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
907 if (class == PCI_CLASS_BRIDGE_HOST) 918 if (class == PCI_CLASS_BRIDGE_HOST)
908 return pci_cfg_space_size_ext(dev); 919 return pci_cfg_space_size_ext(dev);
909 920
910 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 921 pos = pci_pcie_cap(dev);
911 if (!pos) { 922 if (!pos) {
912 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 923 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
913 if (!pos) 924 if (!pos)
@@ -1014,6 +1025,9 @@ static void pci_init_capabilities(struct pci_dev *dev)
1014 1025
1015 /* Single Root I/O Virtualization */ 1026 /* Single Root I/O Virtualization */
1016 pci_iov_init(dev); 1027 pci_iov_init(dev);
1028
1029 /* Enable ACS P2P upstream forwarding */
1030 pci_enable_acs(dev);
1017} 1031}
1018 1032
1019void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) 1033void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
@@ -1110,7 +1124,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1110 unsigned int devfn, pass, max = bus->secondary; 1124 unsigned int devfn, pass, max = bus->secondary;
1111 struct pci_dev *dev; 1125 struct pci_dev *dev;
1112 1126
1113 pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number); 1127 dev_dbg(&bus->dev, "scanning bus\n");
1114 1128
1115 /* Go find them, Rover! */ 1129 /* Go find them, Rover! */
1116 for (devfn = 0; devfn < 0x100; devfn += 8) 1130 for (devfn = 0; devfn < 0x100; devfn += 8)
@@ -1124,8 +1138,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1124 * all PCI-to-PCI bridges on this bus. 1138 * all PCI-to-PCI bridges on this bus.
1125 */ 1139 */
1126 if (!bus->is_added) { 1140 if (!bus->is_added) {
1127 pr_debug("PCI: Fixups for bus %04x:%02x\n", 1141 dev_dbg(&bus->dev, "fixups for bus\n");
1128 pci_domain_nr(bus), bus->number);
1129 pcibios_fixup_bus(bus); 1142 pcibios_fixup_bus(bus);
1130 if (pci_is_root_bus(bus)) 1143 if (pci_is_root_bus(bus))
1131 bus->is_added = 1; 1144 bus->is_added = 1;
@@ -1145,8 +1158,7 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1145 * 1158 *
1146 * Return how far we've got finding sub-buses. 1159 * Return how far we've got finding sub-buses.
1147 */ 1160 */
1148 pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n", 1161 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1149 pci_domain_nr(bus), bus->number, max);
1150 return max; 1162 return max;
1151} 1163}
1152 1164
@@ -1154,7 +1166,7 @@ struct pci_bus * pci_create_bus(struct device *parent,
1154 int bus, struct pci_ops *ops, void *sysdata) 1166 int bus, struct pci_ops *ops, void *sysdata)
1155{ 1167{
1156 int error; 1168 int error;
1157 struct pci_bus *b; 1169 struct pci_bus *b, *b2;
1158 struct device *dev; 1170 struct device *dev;
1159 1171
1160 b = pci_alloc_bus(); 1172 b = pci_alloc_bus();
@@ -1170,9 +1182,10 @@ struct pci_bus * pci_create_bus(struct device *parent,
1170 b->sysdata = sysdata; 1182 b->sysdata = sysdata;
1171 b->ops = ops; 1183 b->ops = ops;
1172 1184
1173 if (pci_find_bus(pci_domain_nr(b), bus)) { 1185 b2 = pci_find_bus(pci_domain_nr(b), bus);
1186 if (b2) {
1174 /* If we already got to this bus through a different bridge, ignore it */ 1187 /* If we already got to this bus through a different bridge, ignore it */
1175 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); 1188 dev_dbg(&b2->dev, "bus already known\n");
1176 goto err_out; 1189 goto err_out;
1177 } 1190 }
1178 1191
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 245d2cdb4765..7cfa7c38d318 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -357,7 +357,7 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
357 pcibios_bus_to_resource(dev, res, &bus_region); 357 pcibios_bus_to_resource(dev, res, &bus_region);
358 358
359 pci_claim_resource(dev, nr); 359 pci_claim_resource(dev, nr);
360 dev_info(&dev->dev, "quirk: region %04x-%04x claimed by %s\n", region, region + size - 1, name); 360 dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
361 } 361 }
362} 362}
363 363
@@ -1680,6 +1680,7 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_
1680 */ 1680 */
1681#define AMD_813X_MISC 0x40 1681#define AMD_813X_MISC 0x40
1682#define AMD_813X_NOIOAMODE (1<<0) 1682#define AMD_813X_NOIOAMODE (1<<0)
1683#define AMD_813X_REV_B1 0x12
1683#define AMD_813X_REV_B2 0x13 1684#define AMD_813X_REV_B2 0x13
1684 1685
1685static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) 1686static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
@@ -1688,7 +1689,8 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1688 1689
1689 if (noioapicquirk) 1690 if (noioapicquirk)
1690 return; 1691 return;
1691 if (dev->revision == AMD_813X_REV_B2) 1692 if ((dev->revision == AMD_813X_REV_B1) ||
1693 (dev->revision == AMD_813X_REV_B2))
1692 return; 1694 return;
1693 1695
1694 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); 1696 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
@@ -1698,8 +1700,10 @@ static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
1698 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n", 1700 dev_info(&dev->dev, "disabled boot interrupts on device [%04x:%04x]\n",
1699 dev->vendor, dev->device); 1701 dev->vendor, dev->device);
1700} 1702}
1701DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); 1703DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1702DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); 1704DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1705DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1706DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
1703 1707
1704#define AMD_8111_PCI_IRQ_ROUTING 0x56 1708#define AMD_8111_PCI_IRQ_ROUTING 0x56
1705 1709
@@ -2595,9 +2599,37 @@ void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
2595static int __init pci_apply_final_quirks(void) 2599static int __init pci_apply_final_quirks(void)
2596{ 2600{
2597 struct pci_dev *dev = NULL; 2601 struct pci_dev *dev = NULL;
2602 u8 cls = 0;
2603 u8 tmp;
2604
2605 if (pci_cache_line_size)
2606 printk(KERN_DEBUG "PCI: CLS %u bytes\n",
2607 pci_cache_line_size << 2);
2598 2608
2599 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2609 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2600 pci_fixup_device(pci_fixup_final, dev); 2610 pci_fixup_device(pci_fixup_final, dev);
2611 /*
2612 * If arch hasn't set it explicitly yet, use the CLS
2613 * value shared by all PCI devices. If there's a
2614 * mismatch, fall back to the default value.
2615 */
2616 if (!pci_cache_line_size) {
2617 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
2618 if (!cls)
2619 cls = tmp;
2620 if (!tmp || cls == tmp)
2621 continue;
2622
2623 printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), "
2624 "using %u bytes\n", cls << 2, tmp << 2,
2625 pci_dfl_cache_line_size << 2);
2626 pci_cache_line_size = pci_dfl_cache_line_size;
2627 }
2628 }
2629 if (!pci_cache_line_size) {
2630 printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
2631 cls << 2, pci_dfl_cache_line_size << 2);
2632 pci_cache_line_size = cls;
2601 } 2633 }
2602 2634
2603 return 0; 2635 return 0;
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index ec415352d9ba..6dae87143258 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -26,14 +26,14 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
26{ 26{
27 struct pci_dev *tmp = NULL; 27 struct pci_dev *tmp = NULL;
28 28
29 if (pdev->is_pcie) 29 if (pci_is_pcie(pdev))
30 return NULL; 30 return NULL;
31 while (1) { 31 while (1) {
32 if (pci_is_root_bus(pdev->bus)) 32 if (pci_is_root_bus(pdev->bus))
33 break; 33 break;
34 pdev = pdev->bus->self; 34 pdev = pdev->bus->self;
35 /* a p2p bridge */ 35 /* a p2p bridge */
36 if (!pdev->is_pcie) { 36 if (!pci_is_pcie(pdev)) {
37 tmp = pdev; 37 tmp = pdev;
38 continue; 38 continue;
39 } 39 }
@@ -149,32 +149,33 @@ struct pci_dev * pci_get_slot(struct pci_bus *bus, unsigned int devfn)
149} 149}
150 150
151/** 151/**
152 * pci_get_bus_and_slot - locate PCI device from a given PCI bus & slot 152 * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot
153 * @bus: number of PCI bus on which desired PCI device resides 153 * @domain: PCI domain/segment on which the PCI device resides.
154 * @devfn: encodes number of PCI slot in which the desired PCI 154 * @bus: PCI bus on which desired PCI device resides
155 * device resides and the logical device number within that slot 155 * @devfn: encodes number of PCI slot in which the desired PCI device
156 * in case of multi-function devices. 156 * resides and the logical device number within that slot in case of
157 * 157 * multi-function devices.
158 * Note: the bus/slot search is limited to PCI domain (segment) 0.
159 * 158 *
160 * Given a PCI bus and slot/function number, the desired PCI device 159 * Given a PCI domain, bus, and slot/function number, the desired PCI
161 * is located in system global list of PCI devices. If the device 160 * device is located in the list of PCI devices. If the device is
162 * is found, a pointer to its data structure is returned. If no 161 * found, its reference count is increased and this function returns a
163 * device is found, %NULL is returned. The returned device has its 162 * pointer to its data structure. The caller must decrement the
164 * reference count bumped by one. 163 * reference count by calling pci_dev_put(). If no device is found,
164 * %NULL is returned.
165 */ 165 */
166 166struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
167struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn) 167 unsigned int devfn)
168{ 168{
169 struct pci_dev *dev = NULL; 169 struct pci_dev *dev = NULL;
170 170
171 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 171 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
172 if (pci_domain_nr(dev->bus) == 0 && 172 if (pci_domain_nr(dev->bus) == domain &&
173 (dev->bus->number == bus && dev->devfn == devfn)) 173 (dev->bus->number == bus && dev->devfn == devfn))
174 return dev; 174 return dev;
175 } 175 }
176 return NULL; 176 return NULL;
177} 177}
178EXPORT_SYMBOL(pci_get_domain_bus_and_slot);
178 179
179static int match_pci_dev_by_id(struct device *dev, void *data) 180static int match_pci_dev_by_id(struct device *dev, void *data)
180{ 181{
@@ -354,5 +355,4 @@ EXPORT_SYMBOL(pci_find_next_bus);
354EXPORT_SYMBOL(pci_get_device); 355EXPORT_SYMBOL(pci_get_device);
355EXPORT_SYMBOL(pci_get_subsys); 356EXPORT_SYMBOL(pci_get_subsys);
356EXPORT_SYMBOL(pci_get_slot); 357EXPORT_SYMBOL(pci_get_slot);
357EXPORT_SYMBOL(pci_get_bus_and_slot);
358EXPORT_SYMBOL(pci_get_class); 358EXPORT_SYMBOL(pci_get_class);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index cb1a027eb552..c48cd377b3f5 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -71,53 +71,50 @@ static void pbus_assign_resources_sorted(const struct pci_bus *bus)
71void pci_setup_cardbus(struct pci_bus *bus) 71void pci_setup_cardbus(struct pci_bus *bus)
72{ 72{
73 struct pci_dev *bridge = bus->self; 73 struct pci_dev *bridge = bus->self;
74 struct resource *res;
74 struct pci_bus_region region; 75 struct pci_bus_region region;
75 76
76 dev_info(&bridge->dev, "CardBus bridge, secondary bus %04x:%02x\n", 77 dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
77 pci_domain_nr(bus), bus->number); 78 bus->secondary, bus->subordinate);
78 79
79 pcibios_resource_to_bus(bridge, &region, bus->resource[0]); 80 res = bus->resource[0];
80 if (bus->resource[0]->flags & IORESOURCE_IO) { 81 pcibios_resource_to_bus(bridge, &region, res);
82 if (res->flags & IORESOURCE_IO) {
81 /* 83 /*
82 * The IO resource is allocated a range twice as large as it 84 * The IO resource is allocated a range twice as large as it
83 * would normally need. This allows us to set both IO regs. 85 * would normally need. This allows us to set both IO regs.
84 */ 86 */
85 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", 87 dev_info(&bridge->dev, " bridge window %pR\n", res);
86 (unsigned long)region.start,
87 (unsigned long)region.end);
88 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, 88 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
89 region.start); 89 region.start);
90 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, 90 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
91 region.end); 91 region.end);
92 } 92 }
93 93
94 pcibios_resource_to_bus(bridge, &region, bus->resource[1]); 94 res = bus->resource[1];
95 if (bus->resource[1]->flags & IORESOURCE_IO) { 95 pcibios_resource_to_bus(bridge, &region, res);
96 dev_info(&bridge->dev, " IO window: %#08lx-%#08lx\n", 96 if (res->flags & IORESOURCE_IO) {
97 (unsigned long)region.start, 97 dev_info(&bridge->dev, " bridge window %pR\n", res);
98 (unsigned long)region.end);
99 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, 98 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
100 region.start); 99 region.start);
101 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, 100 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
102 region.end); 101 region.end);
103 } 102 }
104 103
105 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 104 res = bus->resource[2];
106 if (bus->resource[2]->flags & IORESOURCE_MEM) { 105 pcibios_resource_to_bus(bridge, &region, res);
107 dev_info(&bridge->dev, " PREFETCH window: %#08lx-%#08lx\n", 106 if (res->flags & IORESOURCE_MEM) {
108 (unsigned long)region.start, 107 dev_info(&bridge->dev, " bridge window %pR\n", res);
109 (unsigned long)region.end);
110 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, 108 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
111 region.start); 109 region.start);
112 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, 110 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
113 region.end); 111 region.end);
114 } 112 }
115 113
116 pcibios_resource_to_bus(bridge, &region, bus->resource[3]); 114 res = bus->resource[3];
117 if (bus->resource[3]->flags & IORESOURCE_MEM) { 115 pcibios_resource_to_bus(bridge, &region, res);
118 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", 116 if (res->flags & IORESOURCE_MEM) {
119 (unsigned long)region.start, 117 dev_info(&bridge->dev, " bridge window %pR\n", res);
120 (unsigned long)region.end);
121 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, 118 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
122 region.start); 119 region.start);
123 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, 120 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
@@ -140,34 +137,33 @@ EXPORT_SYMBOL(pci_setup_cardbus);
140static void pci_setup_bridge(struct pci_bus *bus) 137static void pci_setup_bridge(struct pci_bus *bus)
141{ 138{
142 struct pci_dev *bridge = bus->self; 139 struct pci_dev *bridge = bus->self;
140 struct resource *res;
143 struct pci_bus_region region; 141 struct pci_bus_region region;
144 u32 l, bu, lu, io_upper16; 142 u32 l, bu, lu, io_upper16;
145 int pref_mem64;
146 143
147 if (pci_is_enabled(bridge)) 144 if (pci_is_enabled(bridge))
148 return; 145 return;
149 146
150 dev_info(&bridge->dev, "PCI bridge, secondary bus %04x:%02x\n", 147 dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
151 pci_domain_nr(bus), bus->number); 148 bus->secondary, bus->subordinate);
152 149
153 /* Set up the top and bottom of the PCI I/O segment for this bus. */ 150 /* Set up the top and bottom of the PCI I/O segment for this bus. */
154 pcibios_resource_to_bus(bridge, &region, bus->resource[0]); 151 res = bus->resource[0];
155 if (bus->resource[0]->flags & IORESOURCE_IO) { 152 pcibios_resource_to_bus(bridge, &region, res);
153 if (res->flags & IORESOURCE_IO) {
156 pci_read_config_dword(bridge, PCI_IO_BASE, &l); 154 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
157 l &= 0xffff0000; 155 l &= 0xffff0000;
158 l |= (region.start >> 8) & 0x00f0; 156 l |= (region.start >> 8) & 0x00f0;
159 l |= region.end & 0xf000; 157 l |= region.end & 0xf000;
160 /* Set up upper 16 bits of I/O base/limit. */ 158 /* Set up upper 16 bits of I/O base/limit. */
161 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); 159 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
162 dev_info(&bridge->dev, " IO window: %#04lx-%#04lx\n", 160 dev_info(&bridge->dev, " bridge window %pR\n", res);
163 (unsigned long)region.start,
164 (unsigned long)region.end);
165 } 161 }
166 else { 162 else {
167 /* Clear upper 16 bits of I/O base/limit. */ 163 /* Clear upper 16 bits of I/O base/limit. */
168 io_upper16 = 0; 164 io_upper16 = 0;
169 l = 0x00f0; 165 l = 0x00f0;
170 dev_info(&bridge->dev, " IO window: disabled\n"); 166 dev_info(&bridge->dev, " bridge window [io disabled]\n");
171 } 167 }
172 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ 168 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
173 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); 169 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
@@ -178,17 +174,16 @@ static void pci_setup_bridge(struct pci_bus *bus)
178 174
179 /* Set up the top and bottom of the PCI Memory segment 175 /* Set up the top and bottom of the PCI Memory segment
180 for this bus. */ 176 for this bus. */
181 pcibios_resource_to_bus(bridge, &region, bus->resource[1]); 177 res = bus->resource[1];
182 if (bus->resource[1]->flags & IORESOURCE_MEM) { 178 pcibios_resource_to_bus(bridge, &region, res);
179 if (res->flags & IORESOURCE_MEM) {
183 l = (region.start >> 16) & 0xfff0; 180 l = (region.start >> 16) & 0xfff0;
184 l |= region.end & 0xfff00000; 181 l |= region.end & 0xfff00000;
185 dev_info(&bridge->dev, " MEM window: %#08lx-%#08lx\n", 182 dev_info(&bridge->dev, " bridge window %pR\n", res);
186 (unsigned long)region.start,
187 (unsigned long)region.end);
188 } 183 }
189 else { 184 else {
190 l = 0x0000fff0; 185 l = 0x0000fff0;
191 dev_info(&bridge->dev, " MEM window: disabled\n"); 186 dev_info(&bridge->dev, " bridge window [mem disabled]\n");
192 } 187 }
193 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); 188 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
194 189
@@ -198,34 +193,27 @@ static void pci_setup_bridge(struct pci_bus *bus)
198 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); 193 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
199 194
200 /* Set up PREF base/limit. */ 195 /* Set up PREF base/limit. */
201 pref_mem64 = 0;
202 bu = lu = 0; 196 bu = lu = 0;
203 pcibios_resource_to_bus(bridge, &region, bus->resource[2]); 197 res = bus->resource[2];
204 if (bus->resource[2]->flags & IORESOURCE_PREFETCH) { 198 pcibios_resource_to_bus(bridge, &region, res);
205 int width = 8; 199 if (res->flags & IORESOURCE_PREFETCH) {
206 l = (region.start >> 16) & 0xfff0; 200 l = (region.start >> 16) & 0xfff0;
207 l |= region.end & 0xfff00000; 201 l |= region.end & 0xfff00000;
208 if (bus->resource[2]->flags & IORESOURCE_MEM_64) { 202 if (res->flags & IORESOURCE_MEM_64) {
209 pref_mem64 = 1;
210 bu = upper_32_bits(region.start); 203 bu = upper_32_bits(region.start);
211 lu = upper_32_bits(region.end); 204 lu = upper_32_bits(region.end);
212 width = 16;
213 } 205 }
214 dev_info(&bridge->dev, " PREFETCH window: %#0*llx-%#0*llx\n", 206 dev_info(&bridge->dev, " bridge window %pR\n", res);
215 width, (unsigned long long)region.start,
216 width, (unsigned long long)region.end);
217 } 207 }
218 else { 208 else {
219 l = 0x0000fff0; 209 l = 0x0000fff0;
220 dev_info(&bridge->dev, " PREFETCH window: disabled\n"); 210 dev_info(&bridge->dev, " bridge window [mem pref disabled]\n");
221 } 211 }
222 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); 212 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
223 213
224 if (pref_mem64) { 214 /* Set the upper 32 bits of PREF base & limit. */
225 /* Set the upper 32 bits of PREF base & limit. */ 215 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
226 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); 216 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
227 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
228 }
229 217
230 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); 218 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
231} 219}
@@ -345,6 +333,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size)
345#endif 333#endif
346 size = ALIGN(size + size1, 4096); 334 size = ALIGN(size + size1, 4096);
347 if (!size) { 335 if (!size) {
336 if (b_res->start || b_res->end)
337 dev_info(&bus->self->dev, "disabling bridge window "
338 "%pR to [bus %02x-%02x] (unused)\n", b_res,
339 bus->secondary, bus->subordinate);
348 b_res->flags = 0; 340 b_res->flags = 0;
349 return; 341 return;
350 } 342 }
@@ -390,8 +382,9 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
390 align = pci_resource_alignment(dev, r); 382 align = pci_resource_alignment(dev, r);
391 order = __ffs(align) - 20; 383 order = __ffs(align) - 20;
392 if (order > 11) { 384 if (order > 11) {
393 dev_warn(&dev->dev, "BAR %d bad alignment %llx: " 385 dev_warn(&dev->dev, "disabling BAR %d: %pR "
394 "%pR\n", i, (unsigned long long)align, r); 386 "(bad alignment %#llx)\n", i, r,
387 (unsigned long long) align);
395 r->flags = 0; 388 r->flags = 0;
396 continue; 389 continue;
397 } 390 }
@@ -425,6 +418,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
425 } 418 }
426 size = ALIGN(size, min_align); 419 size = ALIGN(size, min_align);
427 if (!size) { 420 if (!size) {
421 if (b_res->start || b_res->end)
422 dev_info(&bus->self->dev, "disabling bridge window "
423 "%pR to [bus %02x-%02x] (unused)\n", b_res,
424 bus->secondary, bus->subordinate);
428 b_res->flags = 0; 425 b_res->flags = 0;
429 return 1; 426 return 1;
430 } 427 }
@@ -582,10 +579,7 @@ static void pci_bus_dump_res(struct pci_bus *bus)
582 if (!res || !res->end) 579 if (!res || !res->end)
583 continue; 580 continue;
584 581
585 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %s %pR\n", i, 582 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
586 (res->flags & IORESOURCE_IO) ? "io: " :
587 ((res->flags & IORESOURCE_PREFETCH)? "pref mem":"mem:"),
588 res);
589 } 583 }
590} 584}
591 585
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index c54526b206b5..7d678bb15ffb 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -51,12 +51,6 @@ void pci_update_resource(struct pci_dev *dev, int resno)
51 51
52 pcibios_resource_to_bus(dev, &region, res); 52 pcibios_resource_to_bus(dev, &region, res);
53 53
54 dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] "
55 "flags %#lx\n", resno, res,
56 (unsigned long long)region.start,
57 (unsigned long long)region.end,
58 (unsigned long)res->flags);
59
60 new = region.start | (res->flags & PCI_REGION_FLAG_MASK); 54 new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
61 if (res->flags & IORESOURCE_IO) 55 if (res->flags & IORESOURCE_IO)
62 mask = (u32)PCI_BASE_ADDRESS_IO_MASK; 56 mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
@@ -91,9 +85,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
91 } 85 }
92 } 86 }
93 res->flags &= ~IORESOURCE_UNSET; 87 res->flags &= ~IORESOURCE_UNSET;
94 dev_dbg(&dev->dev, "BAR %d: moved to bus [%#llx-%#llx] flags %#lx\n", 88 dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx]\n",
95 resno, (unsigned long long)region.start, 89 resno, res, (unsigned long long)region.start,
96 (unsigned long long)region.end, res->flags); 90 (unsigned long long)region.end);
97} 91}
98 92
99int pci_claim_resource(struct pci_dev *dev, int resource) 93int pci_claim_resource(struct pci_dev *dev, int resource)
@@ -103,20 +97,17 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
103 int err; 97 int err;
104 98
105 root = pci_find_parent_resource(dev, res); 99 root = pci_find_parent_resource(dev, res);
106 100 if (!root) {
107 err = -EINVAL; 101 dev_err(&dev->dev, "no compatible bridge window for %pR\n",
108 if (root != NULL) 102 res);
109 err = request_resource(root, res); 103 return -EINVAL;
110
111 if (err) {
112 const char *dtype = resource < PCI_BRIDGE_RESOURCES ? "device" : "bridge";
113 dev_err(&dev->dev, "BAR %d: %s of %s %pR\n",
114 resource,
115 root ? "address space collision on" :
116 "no parent found for",
117 dtype, res);
118 } 104 }
119 105
106 err = request_resource(root, res);
107 if (err)
108 dev_err(&dev->dev,
109 "address space collision: %pR already in use\n", res);
110
120 return err; 111 return err;
121} 112}
122EXPORT_SYMBOL(pci_claim_resource); 113EXPORT_SYMBOL(pci_claim_resource);
@@ -124,7 +115,7 @@ EXPORT_SYMBOL(pci_claim_resource);
124#ifdef CONFIG_PCI_QUIRKS 115#ifdef CONFIG_PCI_QUIRKS
125void pci_disable_bridge_window(struct pci_dev *dev) 116void pci_disable_bridge_window(struct pci_dev *dev)
126{ 117{
127 dev_dbg(&dev->dev, "Disabling bridge window.\n"); 118 dev_info(&dev->dev, "disabling bridge mem windows\n");
128 119
129 /* MMIO Base/Limit */ 120 /* MMIO Base/Limit */
130 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0); 121 pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0);
@@ -165,6 +156,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
165 156
166 if (!ret) { 157 if (!ret) {
167 res->flags &= ~IORESOURCE_STARTALIGN; 158 res->flags &= ~IORESOURCE_STARTALIGN;
159 dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
168 if (resno < PCI_BRIDGE_RESOURCES) 160 if (resno < PCI_BRIDGE_RESOURCES)
169 pci_update_resource(dev, resno); 161 pci_update_resource(dev, resno);
170 } 162 }
@@ -178,12 +170,12 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
178 resource_size_t align; 170 resource_size_t align;
179 struct pci_bus *bus; 171 struct pci_bus *bus;
180 int ret; 172 int ret;
173 char *type;
181 174
182 align = pci_resource_alignment(dev, res); 175 align = pci_resource_alignment(dev, res);
183 if (!align) { 176 if (!align) {
184 dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " 177 dev_info(&dev->dev, "BAR %d: can't assign %pR "
185 "alignment) %pR flags %#lx\n", 178 "(bogus alignment)\n", resno, res);
186 resno, res, res->flags);
187 return -EINVAL; 179 return -EINVAL;
188 } 180 }
189 181
@@ -198,9 +190,20 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
198 break; 190 break;
199 } 191 }
200 192
201 if (ret) 193 if (ret) {
202 dev_info(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", 194 if (res->flags & IORESOURCE_MEM)
203 resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); 195 if (res->flags & IORESOURCE_PREFETCH)
196 type = "mem pref";
197 else
198 type = "mem";
199 else if (res->flags & IORESOURCE_IO)
200 type = "io";
201 else
202 type = "unknown";
203 dev_info(&dev->dev,
204 "BAR %d: can't assign %s (size %#llx)\n",
205 resno, type, (unsigned long long) resource_size(res));
206 }
204 207
205 return ret; 208 return ret;
206} 209}
@@ -225,9 +228,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head)
225 228
226 r_align = pci_resource_alignment(dev, r); 229 r_align = pci_resource_alignment(dev, r);
227 if (!r_align) { 230 if (!r_align) {
228 dev_warn(&dev->dev, "BAR %d: bogus alignment " 231 dev_warn(&dev->dev, "BAR %d: %pR has bogus alignment\n",
229 "%pR flags %#lx\n", 232 i, r);
230 i, r, r->flags);
231 continue; 233 continue;
232 } 234 }
233 for (list = head; ; list = list->next) { 235 for (list = head; ; list = list->next) {
@@ -274,8 +276,8 @@ int pci_enable_resources(struct pci_dev *dev, int mask)
274 continue; 276 continue;
275 277
276 if (!r->parent) { 278 if (!r->parent) {
277 dev_err(&dev->dev, "device not available because of " 279 dev_err(&dev->dev, "device not available "
278 "BAR %d %pR collisions\n", i, r); 280 "(can't reserve %pR)\n", r);
279 return -EINVAL; 281 return -EINVAL;
280 } 282 }
281 283
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 4cd70d056810..a73b040ddbfb 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -184,26 +184,33 @@ fail:
184 184
185=====================================================================*/ 185=====================================================================*/
186 186
187/* 187static void cardbus_config_irq_and_cls(struct pci_bus *bus, int irq)
188 * Since there is only one interrupt available to CardBus
189 * devices, all devices downstream of this device must
190 * be using this IRQ.
191 */
192static void cardbus_assign_irqs(struct pci_bus *bus, int irq)
193{ 188{
194 struct pci_dev *dev; 189 struct pci_dev *dev;
195 190
196 list_for_each_entry(dev, &bus->devices, bus_list) { 191 list_for_each_entry(dev, &bus->devices, bus_list) {
197 u8 irq_pin; 192 u8 irq_pin;
198 193
194 /*
195 * Since there is only one interrupt available to
196 * CardBus devices, all devices downstream of this
197 * device must be using this IRQ.
198 */
199 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin); 199 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq_pin);
200 if (irq_pin) { 200 if (irq_pin) {
201 dev->irq = irq; 201 dev->irq = irq;
202 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); 202 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
203 } 203 }
204 204
205 /*
206 * Some controllers transfer very slowly with 0 CLS.
207 * Configure it. This may fail as CLS configuration
208 * is mandatory only for MWI.
209 */
210 pci_set_cacheline_size(dev);
211
205 if (dev->subordinate) 212 if (dev->subordinate)
206 cardbus_assign_irqs(dev->subordinate, irq); 213 cardbus_config_irq_and_cls(dev->subordinate, irq);
207 } 214 }
208} 215}
209 216
@@ -228,7 +235,7 @@ int __ref cb_alloc(struct pcmcia_socket * s)
228 */ 235 */
229 pci_bus_size_bridges(bus); 236 pci_bus_size_bridges(bus);
230 pci_bus_assign_resources(bus); 237 pci_bus_assign_resources(bus);
231 cardbus_assign_irqs(bus, s->pci_irq); 238 cardbus_config_irq_and_cls(bus, s->pci_irq);
232 239
233 /* socket specific tune function */ 240 /* socket specific tune function */
234 if (s->tune_bridge) 241 if (s->tune_bridge)
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index 8473fe5ed7ff..dfbd5a6cc58b 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -285,15 +285,10 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
285 * the PCI region, and that might prevent a PCI 285 * the PCI region, and that might prevent a PCI
286 * driver from requesting its resources. 286 * driver from requesting its resources.
287 */ 287 */
288 dev_warn(&dev->dev, "%s resource " 288 dev_warn(&dev->dev,
289 "(0x%llx-0x%llx) overlaps %s BAR %d " 289 "disabling %pR because it overlaps "
290 "(0x%llx-0x%llx), disabling\n", 290 "%s BAR %d %pR\n", res,
291 pnp_resource_type_name(res), 291 pci_name(pdev), i, &pdev->resource[i]);
292 (unsigned long long) pnp_start,
293 (unsigned long long) pnp_end,
294 pci_name(pdev), i,
295 (unsigned long long) pci_start,
296 (unsigned long long) pci_end);
297 res->flags |= IORESOURCE_DISABLED; 292 res->flags |= IORESOURCE_DISABLED;
298 } 293 }
299 } 294 }
diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
index ba9765427886..64d0596bafb5 100644
--- a/drivers/pnp/resource.c
+++ b/drivers/pnp/resource.c
@@ -517,7 +517,7 @@ struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
517 res->start = irq; 517 res->start = irq;
518 res->end = irq; 518 res->end = irq;
519 519
520 pnp_dbg(&dev->dev, " add irq %d flags %#x\n", irq, flags); 520 pnp_dbg(&dev->dev, " add %pr\n", res);
521 return pnp_res; 521 return pnp_res;
522} 522}
523 523
@@ -538,7 +538,7 @@ struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
538 res->start = dma; 538 res->start = dma;
539 res->end = dma; 539 res->end = dma;
540 540
541 pnp_dbg(&dev->dev, " add dma %d flags %#x\n", dma, flags); 541 pnp_dbg(&dev->dev, " add %pr\n", res);
542 return pnp_res; 542 return pnp_res;
543} 543}
544 544
@@ -562,8 +562,7 @@ struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
562 res->start = start; 562 res->start = start;
563 res->end = end; 563 res->end = end;
564 564
565 pnp_dbg(&dev->dev, " add io %#llx-%#llx flags %#x\n", 565 pnp_dbg(&dev->dev, " add %pr\n", res);
566 (unsigned long long) start, (unsigned long long) end, flags);
567 return pnp_res; 566 return pnp_res;
568} 567}
569 568
@@ -587,8 +586,7 @@ struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
587 res->start = start; 586 res->start = start;
588 res->end = end; 587 res->end = end;
589 588
590 pnp_dbg(&dev->dev, " add mem %#llx-%#llx flags %#x\n", 589 pnp_dbg(&dev->dev, " add %pr\n", res);
591 (unsigned long long) start, (unsigned long long) end, flags);
592 return pnp_res; 590 return pnp_res;
593} 591}
594 592
diff --git a/drivers/pnp/support.c b/drivers/pnp/support.c
index 63087d5ce609..9585c1c1cc36 100644
--- a/drivers/pnp/support.c
+++ b/drivers/pnp/support.c
@@ -75,47 +75,14 @@ char *pnp_resource_type_name(struct resource *res)
75 75
76void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc) 76void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
77{ 77{
78 char buf[128];
79 int len;
80 struct pnp_resource *pnp_res; 78 struct pnp_resource *pnp_res;
81 struct resource *res;
82 79
83 if (list_empty(&dev->resources)) { 80 if (list_empty(&dev->resources))
84 pnp_dbg(&dev->dev, "%s: no current resources\n", desc); 81 pnp_dbg(&dev->dev, "%s: no current resources\n", desc);
85 return; 82 else {
86 } 83 pnp_dbg(&dev->dev, "%s: current resources:\n", desc);
87 84 list_for_each_entry(pnp_res, &dev->resources, list)
88 pnp_dbg(&dev->dev, "%s: current resources:\n", desc); 85 pnp_dbg(&dev->dev, "%pr\n", &pnp_res->res);
89 list_for_each_entry(pnp_res, &dev->resources, list) {
90 res = &pnp_res->res;
91 len = 0;
92
93 len += scnprintf(buf + len, sizeof(buf) - len, " %-3s ",
94 pnp_resource_type_name(res));
95
96 if (res->flags & IORESOURCE_DISABLED) {
97 pnp_dbg(&dev->dev, "%sdisabled\n", buf);
98 continue;
99 }
100
101 switch (pnp_resource_type(res)) {
102 case IORESOURCE_IO:
103 case IORESOURCE_MEM:
104 len += scnprintf(buf + len, sizeof(buf) - len,
105 "%#llx-%#llx flags %#lx",
106 (unsigned long long) res->start,
107 (unsigned long long) res->end,
108 res->flags);
109 break;
110 case IORESOURCE_IRQ:
111 case IORESOURCE_DMA:
112 len += scnprintf(buf + len, sizeof(buf) - len,
113 "%lld flags %#lx",
114 (unsigned long long) res->start,
115 res->flags);
116 break;
117 }
118 pnp_dbg(&dev->dev, "%s\n", buf);
119 } 86 }
120} 87}
121 88
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 59b90922da8c..49c1720df59a 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -22,11 +22,11 @@ static const struct pnp_device_id pnp_dev_table[] = {
22 {"", 0} 22 {"", 0}
23}; 23};
24 24
25static void reserve_range(struct pnp_dev *dev, resource_size_t start, 25static void reserve_range(struct pnp_dev *dev, struct resource *r, int port)
26 resource_size_t end, int port)
27{ 26{
28 char *regionid; 27 char *regionid;
29 const char *pnpid = dev_name(&dev->dev); 28 const char *pnpid = dev_name(&dev->dev);
29 resource_size_t start = r->start, end = r->end;
30 struct resource *res; 30 struct resource *res;
31 31
32 regionid = kmalloc(16, GFP_KERNEL); 32 regionid = kmalloc(16, GFP_KERNEL);
@@ -48,10 +48,8 @@ static void reserve_range(struct pnp_dev *dev, resource_size_t start,
48 * example do reserve stuff they know about too, so we may well 48 * example do reserve stuff they know about too, so we may well
49 * have double reservations. 49 * have double reservations.
50 */ 50 */
51 dev_info(&dev->dev, "%s range 0x%llx-0x%llx %s reserved\n", 51 dev_info(&dev->dev, "%pR %s reserved\n", r,
52 port ? "ioport" : "iomem", 52 res ? "has been" : "could not be");
53 (unsigned long long) start, (unsigned long long) end,
54 res ? "has been" : "could not be");
55} 53}
56 54
57static void reserve_resources_of_dev(struct pnp_dev *dev) 55static void reserve_resources_of_dev(struct pnp_dev *dev)
@@ -77,14 +75,14 @@ static void reserve_resources_of_dev(struct pnp_dev *dev)
77 if (res->end < res->start) 75 if (res->end < res->start)
78 continue; /* invalid */ 76 continue; /* invalid */
79 77
80 reserve_range(dev, res->start, res->end, 1); 78 reserve_range(dev, res, 1);
81 } 79 }
82 80
83 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) { 81 for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
84 if (res->flags & IORESOURCE_DISABLED) 82 if (res->flags & IORESOURCE_DISABLED)
85 continue; 83 continue;
86 84
87 reserve_range(dev, res->start, res->end, 0); 85 reserve_range(dev, res, 0);
88 } 86 }
89} 87}
90 88
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dfcd75cf4907..80e71fce1850 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -103,6 +103,8 @@ source "drivers/staging/line6/Kconfig"
103 103
104source "drivers/gpu/drm/radeon/Kconfig" 104source "drivers/gpu/drm/radeon/Kconfig"
105 105
106source "drivers/gpu/drm/nouveau/Kconfig"
107
106source "drivers/staging/octeon/Kconfig" 108source "drivers/staging/octeon/Kconfig"
107 109
108source "drivers/staging/serqt_usb2/Kconfig" 110source "drivers/staging/serqt_usb2/Kconfig"
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 240750881d28..81aac7f4ca59 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -60,6 +60,8 @@ config USB_ARCH_HAS_EHCI
60 default y if ARCH_IXP4XX 60 default y if ARCH_IXP4XX
61 default y if ARCH_W90X900 61 default y if ARCH_W90X900
62 default y if ARCH_AT91SAM9G45 62 default y if ARCH_AT91SAM9G45
63 default y if ARCH_MXC
64 default y if ARCH_OMAP34XX
63 default PCI 65 default PCI
64 66
65# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface. 67# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b80bc9f..473aa1a20de9 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -44,3 +44,5 @@ obj-y += early/
44 44
45obj-$(CONFIG_USB_ATM) += atm/ 45obj-$(CONFIG_USB_ATM) += atm/
46obj-$(CONFIG_USB_SPEEDTOUCH) += atm/ 46obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
47
48obj-$(CONFIG_USB_ULPI) += otg/
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e4eca7810bcf..34d4eb98829e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1461,6 +1461,12 @@ err_out:
1461} 1461}
1462 1462
1463#endif /* CONFIG_PM */ 1463#endif /* CONFIG_PM */
1464
1465#define NOKIA_PCSUITE_ACM_INFO(x) \
1466 USB_DEVICE_AND_INTERFACE_INFO(0x0421, x, \
1467 USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, \
1468 USB_CDC_ACM_PROTO_VENDOR)
1469
1464/* 1470/*
1465 * USB driver structure. 1471 * USB driver structure.
1466 */ 1472 */
@@ -1519,6 +1525,57 @@ static struct usb_device_id acm_ids[] = {
1519 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ 1525 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1520 }, 1526 },
1521 1527
1528 /* Nokia S60 phones expose two ACM channels. The first is
1529 * a modem and is picked up by the standard AT-command
1530 * information below. The second is 'vendor-specific' but
1531 * is treated as a serial device at the S60 end, so we want
1532 * to expose it on Linux too. */
1533 { NOKIA_PCSUITE_ACM_INFO(0x042D), }, /* Nokia 3250 */
1534 { NOKIA_PCSUITE_ACM_INFO(0x04D8), }, /* Nokia 5500 Sport */
1535 { NOKIA_PCSUITE_ACM_INFO(0x04C9), }, /* Nokia E50 */
1536 { NOKIA_PCSUITE_ACM_INFO(0x0419), }, /* Nokia E60 */
1537 { NOKIA_PCSUITE_ACM_INFO(0x044D), }, /* Nokia E61 */
1538 { NOKIA_PCSUITE_ACM_INFO(0x0001), }, /* Nokia E61i */
1539 { NOKIA_PCSUITE_ACM_INFO(0x0475), }, /* Nokia E62 */
1540 { NOKIA_PCSUITE_ACM_INFO(0x0508), }, /* Nokia E65 */
1541 { NOKIA_PCSUITE_ACM_INFO(0x0418), }, /* Nokia E70 */
1542 { NOKIA_PCSUITE_ACM_INFO(0x0425), }, /* Nokia N71 */
1543 { NOKIA_PCSUITE_ACM_INFO(0x0486), }, /* Nokia N73 */
1544 { NOKIA_PCSUITE_ACM_INFO(0x04DF), }, /* Nokia N75 */
1545 { NOKIA_PCSUITE_ACM_INFO(0x000e), }, /* Nokia N77 */
1546 { NOKIA_PCSUITE_ACM_INFO(0x0445), }, /* Nokia N80 */
1547 { NOKIA_PCSUITE_ACM_INFO(0x042F), }, /* Nokia N91 & N91 8GB */
1548 { NOKIA_PCSUITE_ACM_INFO(0x048E), }, /* Nokia N92 */
1549 { NOKIA_PCSUITE_ACM_INFO(0x0420), }, /* Nokia N93 */
1550 { NOKIA_PCSUITE_ACM_INFO(0x04E6), }, /* Nokia N93i */
1551 { NOKIA_PCSUITE_ACM_INFO(0x04B2), }, /* Nokia 5700 XpressMusic */
1552 { NOKIA_PCSUITE_ACM_INFO(0x0134), }, /* Nokia 6110 Navigator (China) */
1553 { NOKIA_PCSUITE_ACM_INFO(0x046E), }, /* Nokia 6110 Navigator */
1554 { NOKIA_PCSUITE_ACM_INFO(0x002f), }, /* Nokia 6120 classic & */
1555 { NOKIA_PCSUITE_ACM_INFO(0x0088), }, /* Nokia 6121 classic */
1556 { NOKIA_PCSUITE_ACM_INFO(0x00fc), }, /* Nokia 6124 classic */
1557 { NOKIA_PCSUITE_ACM_INFO(0x0042), }, /* Nokia E51 */
1558 { NOKIA_PCSUITE_ACM_INFO(0x00b0), }, /* Nokia E66 */
1559 { NOKIA_PCSUITE_ACM_INFO(0x00ab), }, /* Nokia E71 */
1560 { NOKIA_PCSUITE_ACM_INFO(0x0481), }, /* Nokia N76 */
1561 { NOKIA_PCSUITE_ACM_INFO(0x0007), }, /* Nokia N81 & N81 8GB */
1562 { NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
1563 { NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
1564 { NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */
1565 { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
1566 { NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
1567 { NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
1568 { NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
1569 { NOKIA_PCSUITE_ACM_INFO(0x00a0), }, /* Nokia 6650 */
1570 { NOKIA_PCSUITE_ACM_INFO(0x007b), }, /* Nokia N78 */
1571 { NOKIA_PCSUITE_ACM_INFO(0x0094), }, /* Nokia N85 */
1572 { NOKIA_PCSUITE_ACM_INFO(0x003a), }, /* Nokia N96 & N96-3 */
1573 { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
1574 { NOKIA_PCSUITE_ACM_INFO(0x0108), }, /* Nokia 5320 XpressMusic 2G */
1575 { NOKIA_PCSUITE_ACM_INFO(0x01f5), }, /* Nokia N97, RM-505 */
1576
1577 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
1578
1522 /* control interfaces with various AT-command sets */ 1579 /* control interfaces with various AT-command sets */
1523 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1580 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1524 USB_CDC_ACM_PROTO_AT_V25TER) }, 1581 USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1533,7 +1590,6 @@ static struct usb_device_id acm_ids[] = {
1533 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, 1590 { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
1534 USB_CDC_ACM_PROTO_AT_CDMA) }, 1591 USB_CDC_ACM_PROTO_AT_CDMA) },
1535 1592
1536 /* NOTE: COMM/ACM/0xff is likely MSFT RNDIS ... NOT a modem!! */
1537 { } 1593 { }
1538}; 1594};
1539 1595
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index b4bd2411c666..7c5f4e32c920 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -347,13 +347,8 @@ usbtmc_abort_bulk_out_check_status:
347 goto exit; 347 goto exit;
348 348
349usbtmc_abort_bulk_out_clear_halt: 349usbtmc_abort_bulk_out_clear_halt:
350 rv = usb_control_msg(data->usb_dev, 350 rv = usb_clear_halt(data->usb_dev,
351 usb_sndctrlpipe(data->usb_dev, 0), 351 usb_sndbulkpipe(data->usb_dev, data->bulk_out));
352 USB_REQ_CLEAR_FEATURE,
353 USB_DIR_OUT | USB_TYPE_STANDARD |
354 USB_RECIP_ENDPOINT,
355 USB_ENDPOINT_HALT, data->bulk_out, buffer,
356 0, USBTMC_TIMEOUT);
357 352
358 if (rv < 0) { 353 if (rv < 0) {
359 dev_err(dev, "usb_control_msg returned %d\n", rv); 354 dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -562,10 +557,16 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
562 n_bytes = roundup(12 + this_part, 4); 557 n_bytes = roundup(12 + this_part, 4);
563 memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part)); 558 memset(buffer + 12 + this_part, 0, n_bytes - (12 + this_part));
564 559
565 retval = usb_bulk_msg(data->usb_dev, 560 do {
566 usb_sndbulkpipe(data->usb_dev, 561 retval = usb_bulk_msg(data->usb_dev,
567 data->bulk_out), 562 usb_sndbulkpipe(data->usb_dev,
568 buffer, n_bytes, &actual, USBTMC_TIMEOUT); 563 data->bulk_out),
564 buffer, n_bytes,
565 &actual, USBTMC_TIMEOUT);
566 if (retval != 0)
567 break;
568 n_bytes -= actual;
569 } while (n_bytes);
569 570
570 data->bTag_last_write = data->bTag; 571 data->bTag_last_write = data->bTag;
571 data->bTag++; 572 data->bTag++;
@@ -702,14 +703,8 @@ usbtmc_clear_check_status:
702 703
703usbtmc_clear_bulk_out_halt: 704usbtmc_clear_bulk_out_halt:
704 705
705 rv = usb_control_msg(data->usb_dev, 706 rv = usb_clear_halt(data->usb_dev,
706 usb_sndctrlpipe(data->usb_dev, 0), 707 usb_sndbulkpipe(data->usb_dev, data->bulk_out));
707 USB_REQ_CLEAR_FEATURE,
708 USB_DIR_OUT | USB_TYPE_STANDARD |
709 USB_RECIP_ENDPOINT,
710 USB_ENDPOINT_HALT,
711 data->bulk_out, buffer, 0,
712 USBTMC_TIMEOUT);
713 if (rv < 0) { 708 if (rv < 0) {
714 dev_err(dev, "usb_control_msg returned %d\n", rv); 709 dev_err(dev, "usb_control_msg returned %d\n", rv);
715 goto exit; 710 goto exit;
@@ -730,13 +725,8 @@ static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
730 if (!buffer) 725 if (!buffer)
731 return -ENOMEM; 726 return -ENOMEM;
732 727
733 rv = usb_control_msg(data->usb_dev, 728 rv = usb_clear_halt(data->usb_dev,
734 usb_sndctrlpipe(data->usb_dev, 0), 729 usb_sndbulkpipe(data->usb_dev, data->bulk_out));
735 USB_REQ_CLEAR_FEATURE,
736 USB_DIR_OUT | USB_TYPE_STANDARD |
737 USB_RECIP_ENDPOINT,
738 USB_ENDPOINT_HALT, data->bulk_out,
739 buffer, 0, USBTMC_TIMEOUT);
740 730
741 if (rv < 0) { 731 if (rv < 0) {
742 dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n", 732 dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
@@ -759,12 +749,8 @@ static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
759 if (!buffer) 749 if (!buffer)
760 return -ENOMEM; 750 return -ENOMEM;
761 751
762 rv = usb_control_msg(data->usb_dev, usb_sndctrlpipe(data->usb_dev, 0), 752 rv = usb_clear_halt(data->usb_dev,
763 USB_REQ_CLEAR_FEATURE, 753 usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
764 USB_DIR_OUT | USB_TYPE_STANDARD |
765 USB_RECIP_ENDPOINT,
766 USB_ENDPOINT_HALT, data->bulk_in, buffer, 0,
767 USBTMC_TIMEOUT);
768 754
769 if (rv < 0) { 755 if (rv < 0) {
770 dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n", 756 dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
@@ -1109,13 +1095,13 @@ static void usbtmc_disconnect(struct usb_interface *intf)
1109 kref_put(&data->kref, usbtmc_delete); 1095 kref_put(&data->kref, usbtmc_delete);
1110} 1096}
1111 1097
1112static int usbtmc_suspend (struct usb_interface *intf, pm_message_t message) 1098static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
1113{ 1099{
1114 /* this driver does not have pending URBs */ 1100 /* this driver does not have pending URBs */
1115 return 0; 1101 return 0;
1116} 1102}
1117 1103
1118static int usbtmc_resume (struct usb_interface *intf) 1104static int usbtmc_resume(struct usb_interface *intf)
1119{ 1105{
1120 return 0; 1106 return 0;
1121} 1107}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 4f864472c5c4..60a45f1e3a67 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -83,6 +83,47 @@ static ssize_t store_new_id(struct device_driver *driver,
83} 83}
84static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 84static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
85 85
86/**
87 * store_remove_id - remove a USB device ID from this driver
88 * @driver: target device driver
89 * @buf: buffer for scanning device ID data
90 * @count: input size
91 *
92 * Removes a dynamic usb device ID from this driver.
93 */
94static ssize_t
95store_remove_id(struct device_driver *driver, const char *buf, size_t count)
96{
97 struct usb_dynid *dynid, *n;
98 struct usb_driver *usb_driver = to_usb_driver(driver);
99 u32 idVendor = 0;
100 u32 idProduct = 0;
101 int fields = 0;
102 int retval = 0;
103
104 fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
105 if (fields < 2)
106 return -EINVAL;
107
108 spin_lock(&usb_driver->dynids.lock);
109 list_for_each_entry_safe(dynid, n, &usb_driver->dynids.list, node) {
110 struct usb_device_id *id = &dynid->id;
111 if ((id->idVendor == idVendor) &&
112 (id->idProduct == idProduct)) {
113 list_del(&dynid->node);
114 kfree(dynid);
115 retval = 0;
116 break;
117 }
118 }
119 spin_unlock(&usb_driver->dynids.lock);
120
121 if (retval)
122 return retval;
123 return count;
124}
125static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
126
86static int usb_create_newid_file(struct usb_driver *usb_drv) 127static int usb_create_newid_file(struct usb_driver *usb_drv)
87{ 128{
88 int error = 0; 129 int error = 0;
@@ -107,6 +148,21 @@ static void usb_remove_newid_file(struct usb_driver *usb_drv)
107 &driver_attr_new_id); 148 &driver_attr_new_id);
108} 149}
109 150
151static int
152usb_create_removeid_file(struct usb_driver *drv)
153{
154 int error = 0;
155 if (drv->probe != NULL)
156 error = driver_create_file(&drv->drvwrap.driver,
157 &driver_attr_remove_id);
158 return error;
159}
160
161static void usb_remove_removeid_file(struct usb_driver *drv)
162{
163 driver_remove_file(&drv->drvwrap.driver, &driver_attr_remove_id);
164}
165
110static void usb_free_dynids(struct usb_driver *usb_drv) 166static void usb_free_dynids(struct usb_driver *usb_drv)
111{ 167{
112 struct usb_dynid *dynid, *n; 168 struct usb_dynid *dynid, *n;
@@ -128,6 +184,16 @@ static void usb_remove_newid_file(struct usb_driver *usb_drv)
128{ 184{
129} 185}
130 186
187static int
188usb_create_removeid_file(struct usb_driver *drv)
189{
190 return 0;
191}
192
193static void usb_remove_removeid_file(struct usb_driver *drv)
194{
195}
196
131static inline void usb_free_dynids(struct usb_driver *usb_drv) 197static inline void usb_free_dynids(struct usb_driver *usb_drv)
132{ 198{
133} 199}
@@ -774,19 +840,34 @@ int usb_register_driver(struct usb_driver *new_driver, struct module *owner,
774 INIT_LIST_HEAD(&new_driver->dynids.list); 840 INIT_LIST_HEAD(&new_driver->dynids.list);
775 841
776 retval = driver_register(&new_driver->drvwrap.driver); 842 retval = driver_register(&new_driver->drvwrap.driver);
843 if (retval)
844 goto out;
777 845
778 if (!retval) { 846 usbfs_update_special();
779 pr_info("%s: registered new interface driver %s\n", 847
848 retval = usb_create_newid_file(new_driver);
849 if (retval)
850 goto out_newid;
851
852 retval = usb_create_removeid_file(new_driver);
853 if (retval)
854 goto out_removeid;
855
856 pr_info("%s: registered new interface driver %s\n",
780 usbcore_name, new_driver->name); 857 usbcore_name, new_driver->name);
781 usbfs_update_special();
782 usb_create_newid_file(new_driver);
783 } else {
784 printk(KERN_ERR "%s: error %d registering interface "
785 " driver %s\n",
786 usbcore_name, retval, new_driver->name);
787 }
788 858
859out:
789 return retval; 860 return retval;
861
862out_removeid:
863 usb_remove_newid_file(new_driver);
864out_newid:
865 driver_unregister(&new_driver->drvwrap.driver);
866
867 printk(KERN_ERR "%s: error %d registering interface "
868 " driver %s\n",
869 usbcore_name, retval, new_driver->name);
870 goto out;
790} 871}
791EXPORT_SYMBOL_GPL(usb_register_driver); 872EXPORT_SYMBOL_GPL(usb_register_driver);
792 873
@@ -806,6 +887,7 @@ void usb_deregister(struct usb_driver *driver)
806 pr_info("%s: deregistering interface driver %s\n", 887 pr_info("%s: deregistering interface driver %s\n",
807 usbcore_name, driver->name); 888 usbcore_name, driver->name);
808 889
890 usb_remove_removeid_file(driver);
809 usb_remove_newid_file(driver); 891 usb_remove_newid_file(driver);
810 usb_free_dynids(driver); 892 usb_free_dynids(driver);
811 driver_unregister(&driver->drvwrap.driver); 893 driver_unregister(&driver->drvwrap.driver);
@@ -948,8 +1030,6 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
948 1030
949 done: 1031 done:
950 dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status); 1032 dev_vdbg(&udev->dev, "%s: status %d\n", __func__, status);
951 if (status == 0)
952 udev->autoresume_disabled = 0;
953 return status; 1033 return status;
954} 1034}
955 1035
@@ -1280,11 +1360,6 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
1280 1360
1281 /* Propagate the resume up the tree, if necessary */ 1361 /* Propagate the resume up the tree, if necessary */
1282 if (udev->state == USB_STATE_SUSPENDED) { 1362 if (udev->state == USB_STATE_SUSPENDED) {
1283 if ((msg.event & PM_EVENT_AUTO) &&
1284 udev->autoresume_disabled) {
1285 status = -EPERM;
1286 goto done;
1287 }
1288 if (parent) { 1363 if (parent) {
1289 status = usb_autoresume_device(parent); 1364 status = usb_autoresume_device(parent);
1290 if (status == 0) { 1365 if (status == 0) {
@@ -1341,7 +1416,6 @@ static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
1341 int status = 0; 1416 int status = 0;
1342 1417
1343 usb_pm_lock(udev); 1418 usb_pm_lock(udev);
1344 udev->auto_pm = 1;
1345 udev->pm_usage_cnt += inc_usage_cnt; 1419 udev->pm_usage_cnt += inc_usage_cnt;
1346 WARN_ON(udev->pm_usage_cnt < 0); 1420 WARN_ON(udev->pm_usage_cnt < 0);
1347 if (inc_usage_cnt) 1421 if (inc_usage_cnt)
@@ -1473,7 +1547,6 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
1473 if (intf->condition == USB_INTERFACE_UNBOUND) 1547 if (intf->condition == USB_INTERFACE_UNBOUND)
1474 status = -ENODEV; 1548 status = -ENODEV;
1475 else { 1549 else {
1476 udev->auto_pm = 1;
1477 atomic_add(inc_usage_cnt, &intf->pm_usage_cnt); 1550 atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
1478 udev->last_busy = jiffies; 1551 udev->last_busy = jiffies;
1479 if (inc_usage_cnt >= 0 && 1552 if (inc_usage_cnt >= 0 &&
@@ -1640,8 +1713,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
1640 1713
1641 if (intf->condition == USB_INTERFACE_UNBOUND) 1714 if (intf->condition == USB_INTERFACE_UNBOUND)
1642 status = -ENODEV; 1715 status = -ENODEV;
1643 else if (udev->autoresume_disabled)
1644 status = -EPERM;
1645 else { 1716 else {
1646 atomic_inc(&intf->pm_usage_cnt); 1717 atomic_inc(&intf->pm_usage_cnt);
1647 if (atomic_read(&intf->pm_usage_cnt) > 0 && 1718 if (atomic_read(&intf->pm_usage_cnt) > 0 &&
@@ -1654,28 +1725,6 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
1654} 1725}
1655EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async); 1726EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
1656 1727
1657/**
1658 * usb_autopm_set_interface - set a USB interface's autosuspend state
1659 * @intf: the usb_interface whose state should be set
1660 *
1661 * This routine sets the autosuspend state of @intf's device according
1662 * to @intf's usage counter, which the caller must have set previously.
1663 * If the counter is <= 0, the device is autosuspended (if it isn't
1664 * already suspended and if nothing else prevents the autosuspend). If
1665 * the counter is > 0, the device is autoresumed (if it isn't already
1666 * awake).
1667 */
1668int usb_autopm_set_interface(struct usb_interface *intf)
1669{
1670 int status;
1671
1672 status = usb_autopm_do_interface(intf, 0);
1673 dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
1674 __func__, status, atomic_read(&intf->pm_usage_cnt));
1675 return status;
1676}
1677EXPORT_SYMBOL_GPL(usb_autopm_set_interface);
1678
1679#else 1728#else
1680 1729
1681void usb_autosuspend_work(struct work_struct *work) 1730void usb_autosuspend_work(struct work_struct *work)
@@ -1707,7 +1756,6 @@ int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
1707 1756
1708 do_unbind_rebind(udev, DO_UNBIND); 1757 do_unbind_rebind(udev, DO_UNBIND);
1709 usb_pm_lock(udev); 1758 usb_pm_lock(udev);
1710 udev->auto_pm = 0;
1711 status = usb_suspend_both(udev, msg); 1759 status = usb_suspend_both(udev, msg);
1712 usb_pm_unlock(udev); 1760 usb_pm_unlock(udev);
1713 return status; 1761 return status;
@@ -1730,7 +1778,6 @@ int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
1730 int status; 1778 int status;
1731 1779
1732 usb_pm_lock(udev); 1780 usb_pm_lock(udev);
1733 udev->auto_pm = 0;
1734 status = usb_resume_both(udev, msg); 1781 status = usb_resume_both(udev, msg);
1735 udev->last_busy = jiffies; 1782 udev->last_busy = jiffies;
1736 usb_pm_unlock(udev); 1783 usb_pm_unlock(udev);
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 222ee07ea680..bfc6c2eea647 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -99,6 +99,7 @@ static int init_usb_class(void)
99 printk(KERN_ERR "class_create failed for usb devices\n"); 99 printk(KERN_ERR "class_create failed for usb devices\n");
100 kfree(usb_class); 100 kfree(usb_class);
101 usb_class = NULL; 101 usb_class = NULL;
102 goto exit;
102 } 103 }
103 usb_class->class->devnode = usb_devnode; 104 usb_class->class->devnode = usb_devnode;
104 105
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 05e6d313961e..bdf87a8414a1 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -139,7 +139,7 @@ int usb_choose_configuration(struct usb_device *udev)
139 139
140 if (best) { 140 if (best) {
141 i = best->desc.bConfigurationValue; 141 i = best->desc.bConfigurationValue;
142 dev_info(&udev->dev, 142 dev_dbg(&udev->dev,
143 "configuration #%d chosen from %d choice%s\n", 143 "configuration #%d chosen from %d choice%s\n",
144 i, num_configs, plural(num_configs)); 144 i, num_configs, plural(num_configs));
145 } else { 145 } else {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 34de475f016e..6dac3b802d41 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -38,6 +38,7 @@
38#include <asm/unaligned.h> 38#include <asm/unaligned.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/workqueue.h> 40#include <linux/workqueue.h>
41#include <linux/mutex.h>
41 42
42#include <linux/usb.h> 43#include <linux/usb.h>
43 44
@@ -1275,13 +1276,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1275 1276
1276 if (usb_endpoint_xfer_control(&urb->ep->desc) 1277 if (usb_endpoint_xfer_control(&urb->ep->desc)
1277 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) { 1278 && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) {
1278 if (hcd->self.uses_dma) 1279 if (hcd->self.uses_dma) {
1279 urb->setup_dma = dma_map_single( 1280 urb->setup_dma = dma_map_single(
1280 hcd->self.controller, 1281 hcd->self.controller,
1281 urb->setup_packet, 1282 urb->setup_packet,
1282 sizeof(struct usb_ctrlrequest), 1283 sizeof(struct usb_ctrlrequest),
1283 DMA_TO_DEVICE); 1284 DMA_TO_DEVICE);
1284 else if (hcd->driver->flags & HCD_LOCAL_MEM) 1285 if (dma_mapping_error(hcd->self.controller,
1286 urb->setup_dma))
1287 return -EAGAIN;
1288 } else if (hcd->driver->flags & HCD_LOCAL_MEM)
1285 ret = hcd_alloc_coherent( 1289 ret = hcd_alloc_coherent(
1286 urb->dev->bus, mem_flags, 1290 urb->dev->bus, mem_flags,
1287 &urb->setup_dma, 1291 &urb->setup_dma,
@@ -1293,13 +1297,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1293 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1297 dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1294 if (ret == 0 && urb->transfer_buffer_length != 0 1298 if (ret == 0 && urb->transfer_buffer_length != 0
1295 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 1299 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
1296 if (hcd->self.uses_dma) 1300 if (hcd->self.uses_dma) {
1297 urb->transfer_dma = dma_map_single ( 1301 urb->transfer_dma = dma_map_single (
1298 hcd->self.controller, 1302 hcd->self.controller,
1299 urb->transfer_buffer, 1303 urb->transfer_buffer,
1300 urb->transfer_buffer_length, 1304 urb->transfer_buffer_length,
1301 dir); 1305 dir);
1302 else if (hcd->driver->flags & HCD_LOCAL_MEM) { 1306 if (dma_mapping_error(hcd->self.controller,
1307 urb->transfer_dma))
1308 return -EAGAIN;
1309 } else if (hcd->driver->flags & HCD_LOCAL_MEM) {
1303 ret = hcd_alloc_coherent( 1310 ret = hcd_alloc_coherent(
1304 urb->dev->bus, mem_flags, 1311 urb->dev->bus, mem_flags,
1305 &urb->transfer_dma, 1312 &urb->transfer_dma,
@@ -1589,19 +1596,32 @@ rescan:
1589 } 1596 }
1590} 1597}
1591 1598
1592/* Check whether a new configuration or alt setting for an interface 1599/**
1593 * will exceed the bandwidth for the bus (or the host controller resources). 1600 * Check whether a new bandwidth setting exceeds the bus bandwidth.
1594 * Only pass in a non-NULL config or interface, not both! 1601 * @new_config: new configuration to install
1595 * Passing NULL for both new_config and new_intf means the device will be 1602 * @cur_alt: the current alternate interface setting
1596 * de-configured by issuing a set configuration 0 command. 1603 * @new_alt: alternate interface setting that is being installed
1604 *
1605 * To change configurations, pass in the new configuration in new_config,
1606 * and pass NULL for cur_alt and new_alt.
1607 *
1608 * To reset a device's configuration (put the device in the ADDRESSED state),
1609 * pass in NULL for new_config, cur_alt, and new_alt.
1610 *
1611 * To change alternate interface settings, pass in NULL for new_config,
1612 * pass in the current alternate interface setting in cur_alt,
1613 * and pass in the new alternate interface setting in new_alt.
1614 *
1615 * Returns an error if the requested bandwidth change exceeds the
1616 * bus bandwidth or host controller internal resources.
1597 */ 1617 */
1598int usb_hcd_check_bandwidth(struct usb_device *udev, 1618int usb_hcd_alloc_bandwidth(struct usb_device *udev,
1599 struct usb_host_config *new_config, 1619 struct usb_host_config *new_config,
1600 struct usb_interface *new_intf) 1620 struct usb_host_interface *cur_alt,
1621 struct usb_host_interface *new_alt)
1601{ 1622{
1602 int num_intfs, i, j; 1623 int num_intfs, i, j;
1603 struct usb_interface_cache *intf_cache; 1624 struct usb_host_interface *alt = NULL;
1604 struct usb_host_interface *alt = 0;
1605 int ret = 0; 1625 int ret = 0;
1606 struct usb_hcd *hcd; 1626 struct usb_hcd *hcd;
1607 struct usb_host_endpoint *ep; 1627 struct usb_host_endpoint *ep;
@@ -1611,7 +1631,7 @@ int usb_hcd_check_bandwidth(struct usb_device *udev,
1611 return 0; 1631 return 0;
1612 1632
1613 /* Configuration is being removed - set configuration 0 */ 1633 /* Configuration is being removed - set configuration 0 */
1614 if (!new_config && !new_intf) { 1634 if (!new_config && !cur_alt) {
1615 for (i = 1; i < 16; ++i) { 1635 for (i = 1; i < 16; ++i) {
1616 ep = udev->ep_out[i]; 1636 ep = udev->ep_out[i];
1617 if (ep) 1637 if (ep)
@@ -1648,19 +1668,12 @@ int usb_hcd_check_bandwidth(struct usb_device *udev,
1648 } 1668 }
1649 } 1669 }
1650 for (i = 0; i < num_intfs; ++i) { 1670 for (i = 0; i < num_intfs; ++i) {
1671 /* Set up endpoints for alternate interface setting 0 */
1672 alt = usb_find_alt_setting(new_config, i, 0);
1673 if (!alt)
1674 /* No alt setting 0? Pick the first setting. */
1675 alt = &new_config->intf_cache[i]->altsetting[0];
1651 1676
1652 /* Dig the endpoints for alt setting 0 out of the
1653 * interface cache for this interface
1654 */
1655 intf_cache = new_config->intf_cache[i];
1656 for (j = 0; j < intf_cache->num_altsetting; j++) {
1657 if (intf_cache->altsetting[j].desc.bAlternateSetting == 0)
1658 alt = &intf_cache->altsetting[j];
1659 }
1660 if (!alt) {
1661 printk(KERN_DEBUG "Did not find alt setting 0 for intf %d\n", i);
1662 continue;
1663 }
1664 for (j = 0; j < alt->desc.bNumEndpoints; j++) { 1677 for (j = 0; j < alt->desc.bNumEndpoints; j++) {
1665 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]); 1678 ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
1666 if (ret < 0) 1679 if (ret < 0)
@@ -1668,6 +1681,22 @@ int usb_hcd_check_bandwidth(struct usb_device *udev,
1668 } 1681 }
1669 } 1682 }
1670 } 1683 }
1684 if (cur_alt && new_alt) {
1685 /* Drop all the endpoints in the current alt setting */
1686 for (i = 0; i < cur_alt->desc.bNumEndpoints; i++) {
1687 ret = hcd->driver->drop_endpoint(hcd, udev,
1688 &cur_alt->endpoint[i]);
1689 if (ret < 0)
1690 goto reset;
1691 }
1692 /* Add all the endpoints in the new alt setting */
1693 for (i = 0; i < new_alt->desc.bNumEndpoints; i++) {
1694 ret = hcd->driver->add_endpoint(hcd, udev,
1695 &new_alt->endpoint[i]);
1696 if (ret < 0)
1697 goto reset;
1698 }
1699 }
1671 ret = hcd->driver->check_bandwidth(hcd, udev); 1700 ret = hcd->driver->check_bandwidth(hcd, udev);
1672reset: 1701reset:
1673 if (ret < 0) 1702 if (ret < 0)
@@ -1984,6 +2013,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
1984#ifdef CONFIG_PM 2013#ifdef CONFIG_PM
1985 INIT_WORK(&hcd->wakeup_work, hcd_resume_work); 2014 INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
1986#endif 2015#endif
2016 mutex_init(&hcd->bandwidth_mutex);
1987 2017
1988 hcd->driver = driver; 2018 hcd->driver = driver;
1989 hcd->product_desc = (driver->product_desc) ? driver->product_desc : 2019 hcd->product_desc = (driver->product_desc) ? driver->product_desc :
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index 79782a1c43f6..d8b43aee581e 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -111,6 +111,20 @@ struct usb_hcd {
111 u64 rsrc_len; /* memory/io resource length */ 111 u64 rsrc_len; /* memory/io resource length */
112 unsigned power_budget; /* in mA, 0 = no limit */ 112 unsigned power_budget; /* in mA, 0 = no limit */
113 113
114 /* bandwidth_mutex should be taken before adding or removing
115 * any new bus bandwidth constraints:
116 * 1. Before adding a configuration for a new device.
117 * 2. Before removing the configuration to put the device into
118 * the addressed state.
119 * 3. Before selecting a different configuration.
120 * 4. Before selecting an alternate interface setting.
121 *
122 * bandwidth_mutex should be dropped after a successful control message
123 * to the device, or resetting the bandwidth after a failed attempt.
124 */
125 struct mutex bandwidth_mutex;
126
127
114#define HCD_BUFFER_POOLS 4 128#define HCD_BUFFER_POOLS 4
115 struct dma_pool *pool [HCD_BUFFER_POOLS]; 129 struct dma_pool *pool [HCD_BUFFER_POOLS];
116 130
@@ -290,9 +304,10 @@ extern void usb_hcd_disable_endpoint(struct usb_device *udev,
290extern void usb_hcd_reset_endpoint(struct usb_device *udev, 304extern void usb_hcd_reset_endpoint(struct usb_device *udev,
291 struct usb_host_endpoint *ep); 305 struct usb_host_endpoint *ep);
292extern void usb_hcd_synchronize_unlinks(struct usb_device *udev); 306extern void usb_hcd_synchronize_unlinks(struct usb_device *udev);
293extern int usb_hcd_check_bandwidth(struct usb_device *udev, 307extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
294 struct usb_host_config *new_config, 308 struct usb_host_config *new_config,
295 struct usb_interface *new_intf); 309 struct usb_host_interface *old_alt,
310 struct usb_host_interface *new_alt);
296extern int usb_hcd_get_frame_number(struct usb_device *udev); 311extern int usb_hcd_get_frame_number(struct usb_device *udev);
297 312
298extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver, 313extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 0f857e645058..06af970e1064 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -45,7 +45,6 @@ struct usb_hub {
45 45
46 /* buffer for urb ... with extra space in case of babble */ 46 /* buffer for urb ... with extra space in case of babble */
47 char (*buffer)[8]; 47 char (*buffer)[8];
48 dma_addr_t buffer_dma; /* DMA address for buffer */
49 union { 48 union {
50 struct usb_hub_status hub; 49 struct usb_hub_status hub;
51 struct usb_port_status port; 50 struct usb_port_status port;
@@ -61,6 +60,8 @@ struct usb_hub {
61 status change */ 60 status change */
62 unsigned long busy_bits[1]; /* ports being reset or 61 unsigned long busy_bits[1]; /* ports being reset or
63 resumed */ 62 resumed */
63 unsigned long removed_bits[1]; /* ports with a "removed"
64 device present */
64#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */ 65#if USB_MAXCHILDREN > 31 /* 8*sizeof(unsigned long) - 1 */
65#error event_bits[] is too short! 66#error event_bits[] is too short!
66#endif 67#endif
@@ -70,6 +71,7 @@ struct usb_hub {
70 71
71 unsigned mA_per_port; /* current for each child */ 72 unsigned mA_per_port; /* current for each child */
72 73
74 unsigned init_done:1;
73 unsigned limited_power:1; 75 unsigned limited_power:1;
74 unsigned quiescing:1; 76 unsigned quiescing:1;
75 unsigned disconnected:1; 77 unsigned disconnected:1;
@@ -374,12 +376,13 @@ static void kick_khubd(struct usb_hub *hub)
374{ 376{
375 unsigned long flags; 377 unsigned long flags;
376 378
377 /* Suppress autosuspend until khubd runs */
378 atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1);
379
380 spin_lock_irqsave(&hub_event_lock, flags); 379 spin_lock_irqsave(&hub_event_lock, flags);
381 if (!hub->disconnected && list_empty(&hub->event_list)) { 380 if (!hub->disconnected && list_empty(&hub->event_list)) {
382 list_add_tail(&hub->event_list, &hub_event_list); 381 list_add_tail(&hub->event_list, &hub_event_list);
382
383 /* Suppress autosuspend until khubd runs */
384 usb_autopm_get_interface_no_resume(
385 to_usb_interface(hub->intfdev));
383 wake_up(&khubd_wait); 386 wake_up(&khubd_wait);
384 } 387 }
385 spin_unlock_irqrestore(&hub_event_lock, flags); 388 spin_unlock_irqrestore(&hub_event_lock, flags);
@@ -636,8 +639,35 @@ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
636 kick_khubd(hub); 639 kick_khubd(hub);
637} 640}
638 641
642/**
643 * usb_remove_device - disable a device's port on its parent hub
644 * @udev: device to be disabled and removed
645 * Context: @udev locked, must be able to sleep.
646 *
647 * After @udev's port has been disabled, khubd is notified and it will
648 * see that the device has been disconnected. When the device is
649 * physically unplugged and something is plugged in, the events will
650 * be received and processed normally.
651 */
652int usb_remove_device(struct usb_device *udev)
653{
654 struct usb_hub *hub;
655 struct usb_interface *intf;
656
657 if (!udev->parent) /* Can't remove a root hub */
658 return -EINVAL;
659 hub = hdev_to_hub(udev->parent);
660 intf = to_usb_interface(hub->intfdev);
661
662 usb_autopm_get_interface(intf);
663 set_bit(udev->portnum, hub->removed_bits);
664 hub_port_logical_disconnect(hub, udev->portnum);
665 usb_autopm_put_interface(intf);
666 return 0;
667}
668
639enum hub_activation_type { 669enum hub_activation_type {
640 HUB_INIT, HUB_INIT2, HUB_INIT3, 670 HUB_INIT, HUB_INIT2, HUB_INIT3, /* INITs must come first */
641 HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME, 671 HUB_POST_RESET, HUB_RESUME, HUB_RESET_RESUME,
642}; 672};
643 673
@@ -682,8 +712,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
682 msecs_to_jiffies(delay)); 712 msecs_to_jiffies(delay));
683 713
684 /* Suppress autosuspend until init is done */ 714 /* Suppress autosuspend until init is done */
685 atomic_set(&to_usb_interface(hub->intfdev)-> 715 usb_autopm_get_interface_no_resume(
686 pm_usage_cnt, 1); 716 to_usb_interface(hub->intfdev));
687 return; /* Continues at init2: below */ 717 return; /* Continues at init2: below */
688 } else { 718 } else {
689 hub_power_on(hub, true); 719 hub_power_on(hub, true);
@@ -731,6 +761,13 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
731 USB_PORT_FEAT_C_ENABLE); 761 USB_PORT_FEAT_C_ENABLE);
732 } 762 }
733 763
764 /* We can forget about a "removed" device when there's a
765 * physical disconnect or the connect status changes.
766 */
767 if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
768 (portchange & USB_PORT_STAT_C_CONNECTION))
769 clear_bit(port1, hub->removed_bits);
770
734 if (!udev || udev->state == USB_STATE_NOTATTACHED) { 771 if (!udev || udev->state == USB_STATE_NOTATTACHED) {
735 /* Tell khubd to disconnect the device or 772 /* Tell khubd to disconnect the device or
736 * check for a new connection 773 * check for a new connection
@@ -783,6 +820,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
783 } 820 }
784 init3: 821 init3:
785 hub->quiescing = 0; 822 hub->quiescing = 0;
823 hub->init_done = 1;
786 824
787 status = usb_submit_urb(hub->urb, GFP_NOIO); 825 status = usb_submit_urb(hub->urb, GFP_NOIO);
788 if (status < 0) 826 if (status < 0)
@@ -792,6 +830,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
792 830
793 /* Scan all ports that need attention */ 831 /* Scan all ports that need attention */
794 kick_khubd(hub); 832 kick_khubd(hub);
833
834 /* Allow autosuspend if it was suppressed */
835 if (type <= HUB_INIT3)
836 usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
795} 837}
796 838
797/* Implement the continuations for the delays above */ 839/* Implement the continuations for the delays above */
@@ -819,6 +861,11 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
819 int i; 861 int i;
820 862
821 cancel_delayed_work_sync(&hub->init_work); 863 cancel_delayed_work_sync(&hub->init_work);
864 if (!hub->init_done) {
865 hub->init_done = 1;
866 usb_autopm_put_interface_no_suspend(
867 to_usb_interface(hub->intfdev));
868 }
822 869
823 /* khubd and related activity won't re-trigger */ 870 /* khubd and related activity won't re-trigger */
824 hub->quiescing = 1; 871 hub->quiescing = 1;
@@ -869,8 +916,7 @@ static int hub_configure(struct usb_hub *hub,
869 int maxp, ret; 916 int maxp, ret;
870 char *message = "out of memory"; 917 char *message = "out of memory";
871 918
872 hub->buffer = usb_buffer_alloc(hdev, sizeof(*hub->buffer), GFP_KERNEL, 919 hub->buffer = kmalloc(sizeof(*hub->buffer), GFP_KERNEL);
873 &hub->buffer_dma);
874 if (!hub->buffer) { 920 if (!hub->buffer) {
875 ret = -ENOMEM; 921 ret = -ENOMEM;
876 goto fail; 922 goto fail;
@@ -1111,8 +1157,6 @@ static int hub_configure(struct usb_hub *hub,
1111 1157
1112 usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq, 1158 usb_fill_int_urb(hub->urb, hdev, pipe, *hub->buffer, maxp, hub_irq,
1113 hub, endpoint->bInterval); 1159 hub, endpoint->bInterval);
1114 hub->urb->transfer_dma = hub->buffer_dma;
1115 hub->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
1116 1160
1117 /* maybe cycle the hub leds */ 1161 /* maybe cycle the hub leds */
1118 if (hub->has_indicators && blinkenlights) 1162 if (hub->has_indicators && blinkenlights)
@@ -1144,7 +1188,10 @@ static void hub_disconnect(struct usb_interface *intf)
1144 1188
1145 /* Take the hub off the event list and don't let it be added again */ 1189 /* Take the hub off the event list and don't let it be added again */
1146 spin_lock_irq(&hub_event_lock); 1190 spin_lock_irq(&hub_event_lock);
1147 list_del_init(&hub->event_list); 1191 if (!list_empty(&hub->event_list)) {
1192 list_del_init(&hub->event_list);
1193 usb_autopm_put_interface_no_suspend(intf);
1194 }
1148 hub->disconnected = 1; 1195 hub->disconnected = 1;
1149 spin_unlock_irq(&hub_event_lock); 1196 spin_unlock_irq(&hub_event_lock);
1150 1197
@@ -1162,8 +1209,7 @@ static void hub_disconnect(struct usb_interface *intf)
1162 kfree(hub->port_owners); 1209 kfree(hub->port_owners);
1163 kfree(hub->descriptor); 1210 kfree(hub->descriptor);
1164 kfree(hub->status); 1211 kfree(hub->status);
1165 usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer, 1212 kfree(hub->buffer);
1166 hub->buffer_dma);
1167 1213
1168 kref_put(&hub->kref, hub_release); 1214 kref_put(&hub->kref, hub_release);
1169} 1215}
@@ -1630,7 +1676,7 @@ static int usb_configure_device_otg(struct usb_device *udev)
1630 if (!udev->bus->is_b_host 1676 if (!udev->bus->is_b_host
1631 && udev->config 1677 && udev->config
1632 && udev->parent == udev->bus->root_hub) { 1678 && udev->parent == udev->bus->root_hub) {
1633 struct usb_otg_descriptor *desc = 0; 1679 struct usb_otg_descriptor *desc = NULL;
1634 struct usb_bus *bus = udev->bus; 1680 struct usb_bus *bus = udev->bus;
1635 1681
1636 /* descriptor may appear anywhere in config */ 1682 /* descriptor may appear anywhere in config */
@@ -2123,9 +2169,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2123 USB_DEVICE_REMOTE_WAKEUP, 0, 2169 USB_DEVICE_REMOTE_WAKEUP, 0,
2124 NULL, 0, 2170 NULL, 0,
2125 USB_CTRL_SET_TIMEOUT); 2171 USB_CTRL_SET_TIMEOUT);
2126 if (status) 2172 if (status) {
2127 dev_dbg(&udev->dev, "won't remote wakeup, status %d\n", 2173 dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
2128 status); 2174 status);
2175 /* bail if autosuspend is requested */
2176 if (msg.event & PM_EVENT_AUTO)
2177 return status;
2178 }
2129 } 2179 }
2130 2180
2131 /* see 7.1.7.6 */ 2181 /* see 7.1.7.6 */
@@ -2134,7 +2184,8 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2134 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n", 2184 dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
2135 port1, status); 2185 port1, status);
2136 /* paranoia: "should not happen" */ 2186 /* paranoia: "should not happen" */
2137 (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 2187 if (udev->do_remote_wakeup)
2188 (void) usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
2138 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE, 2189 USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
2139 USB_DEVICE_REMOTE_WAKEUP, 0, 2190 USB_DEVICE_REMOTE_WAKEUP, 0,
2140 NULL, 0, 2191 NULL, 0,
@@ -2965,6 +3016,13 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2965 usb_disconnect(&hdev->children[port1-1]); 3016 usb_disconnect(&hdev->children[port1-1]);
2966 clear_bit(port1, hub->change_bits); 3017 clear_bit(port1, hub->change_bits);
2967 3018
3019 /* We can forget about a "removed" device when there's a physical
3020 * disconnect or the connect status changes.
3021 */
3022 if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
3023 (portchange & USB_PORT_STAT_C_CONNECTION))
3024 clear_bit(port1, hub->removed_bits);
3025
2968 if (portchange & (USB_PORT_STAT_C_CONNECTION | 3026 if (portchange & (USB_PORT_STAT_C_CONNECTION |
2969 USB_PORT_STAT_C_ENABLE)) { 3027 USB_PORT_STAT_C_ENABLE)) {
2970 status = hub_port_debounce(hub, port1); 3028 status = hub_port_debounce(hub, port1);
@@ -2978,8 +3036,11 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
2978 } 3036 }
2979 } 3037 }
2980 3038
2981 /* Return now if debouncing failed or nothing is connected */ 3039 /* Return now if debouncing failed or nothing is connected or
2982 if (!(portstatus & USB_PORT_STAT_CONNECTION)) { 3040 * the device was "removed".
3041 */
3042 if (!(portstatus & USB_PORT_STAT_CONNECTION) ||
3043 test_bit(port1, hub->removed_bits)) {
2983 3044
2984 /* maybe switch power back on (e.g. root hub was reset) */ 3045 /* maybe switch power back on (e.g. root hub was reset) */
2985 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2 3046 if ((wHubCharacteristics & HUB_CHAR_LPSM) < 2
@@ -3189,7 +3250,7 @@ static void hub_events(void)
3189 * disconnected while waiting for the lock to succeed. */ 3250 * disconnected while waiting for the lock to succeed. */
3190 usb_lock_device(hdev); 3251 usb_lock_device(hdev);
3191 if (unlikely(hub->disconnected)) 3252 if (unlikely(hub->disconnected))
3192 goto loop; 3253 goto loop2;
3193 3254
3194 /* If the hub has died, clean up after it */ 3255 /* If the hub has died, clean up after it */
3195 if (hdev->state == USB_STATE_NOTATTACHED) { 3256 if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3338,11 +3399,15 @@ static void hub_events(void)
3338 } 3399 }
3339 } 3400 }
3340 3401
3341loop_autopm: 3402 loop_autopm:
3342 /* Allow autosuspend if we're not going to run again */ 3403 /* Balance the usb_autopm_get_interface() above */
3343 if (list_empty(&hub->event_list)) 3404 usb_autopm_put_interface_no_suspend(intf);
3344 usb_autopm_enable(intf); 3405 loop:
3345loop: 3406 /* Balance the usb_autopm_get_interface_no_resume() in
3407 * kick_khubd() and allow autosuspend.
3408 */
3409 usb_autopm_put_interface(intf);
3410 loop2:
3346 usb_unlock_device(hdev); 3411 usb_unlock_device(hdev);
3347 kref_put(&hub->kref, hub_release); 3412 kref_put(&hub->kref, hub_release);
3348 3413
@@ -3534,6 +3599,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3534{ 3599{
3535 struct usb_device *parent_hdev = udev->parent; 3600 struct usb_device *parent_hdev = udev->parent;
3536 struct usb_hub *parent_hub; 3601 struct usb_hub *parent_hub;
3602 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
3537 struct usb_device_descriptor descriptor = udev->descriptor; 3603 struct usb_device_descriptor descriptor = udev->descriptor;
3538 int i, ret = 0; 3604 int i, ret = 0;
3539 int port1 = udev->portnum; 3605 int port1 = udev->portnum;
@@ -3577,6 +3643,16 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3577 /* Restore the device's previous configuration */ 3643 /* Restore the device's previous configuration */
3578 if (!udev->actconfig) 3644 if (!udev->actconfig)
3579 goto done; 3645 goto done;
3646
3647 mutex_lock(&hcd->bandwidth_mutex);
3648 ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
3649 if (ret < 0) {
3650 dev_warn(&udev->dev,
3651 "Busted HC? Not enough HCD resources for "
3652 "old configuration.\n");
3653 mutex_unlock(&hcd->bandwidth_mutex);
3654 goto re_enumerate;
3655 }
3580 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 3656 ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
3581 USB_REQ_SET_CONFIGURATION, 0, 3657 USB_REQ_SET_CONFIGURATION, 0,
3582 udev->actconfig->desc.bConfigurationValue, 0, 3658 udev->actconfig->desc.bConfigurationValue, 0,
@@ -3585,8 +3661,10 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3585 dev_err(&udev->dev, 3661 dev_err(&udev->dev,
3586 "can't restore configuration #%d (error=%d)\n", 3662 "can't restore configuration #%d (error=%d)\n",
3587 udev->actconfig->desc.bConfigurationValue, ret); 3663 udev->actconfig->desc.bConfigurationValue, ret);
3664 mutex_unlock(&hcd->bandwidth_mutex);
3588 goto re_enumerate; 3665 goto re_enumerate;
3589 } 3666 }
3667 mutex_unlock(&hcd->bandwidth_mutex);
3590 usb_set_device_state(udev, USB_STATE_CONFIGURED); 3668 usb_set_device_state(udev, USB_STATE_CONFIGURED);
3591 3669
3592 /* Put interfaces back into the same altsettings as before. 3670 /* Put interfaces back into the same altsettings as before.
@@ -3596,7 +3674,8 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3596 * endpoint state. 3674 * endpoint state.
3597 */ 3675 */
3598 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) { 3676 for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
3599 struct usb_interface *intf = udev->actconfig->interface[i]; 3677 struct usb_host_config *config = udev->actconfig;
3678 struct usb_interface *intf = config->interface[i];
3600 struct usb_interface_descriptor *desc; 3679 struct usb_interface_descriptor *desc;
3601 3680
3602 desc = &intf->cur_altsetting->desc; 3681 desc = &intf->cur_altsetting->desc;
@@ -3605,6 +3684,17 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
3605 usb_enable_interface(udev, intf, true); 3684 usb_enable_interface(udev, intf, true);
3606 ret = 0; 3685 ret = 0;
3607 } else { 3686 } else {
3687 /* We've just reset the device, so it will think alt
3688 * setting 0 is installed. For usb_set_interface() to
3689 * work properly, we need to set the current alternate
3690 * interface setting to 0 (or the first alt setting, if
3691 * the device doesn't have alt setting 0).
3692 */
3693 intf->cur_altsetting =
3694 usb_find_alt_setting(config, i, 0);
3695 if (!intf->cur_altsetting)
3696 intf->cur_altsetting =
3697 &config->intf_cache[i]->altsetting[0];
3608 ret = usb_set_interface(udev, desc->bInterfaceNumber, 3698 ret = usb_set_interface(udev, desc->bInterfaceNumber,
3609 desc->bAlternateSetting); 3699 desc->bAlternateSetting);
3610 } 3700 }
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e80f1af438c8..1b994846e8e0 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -393,13 +393,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
393 if (io->entries <= 0) 393 if (io->entries <= 0)
394 return io->entries; 394 return io->entries;
395 395
396 /* If we're running on an xHCI host controller, queue the whole scatter 396 if (dev->bus->sg_tablesize > 0) {
397 * gather list with one call to urb_enqueue(). This is only for bulk,
398 * as that endpoint type does not care how the data gets broken up
399 * across frames.
400 */
401 if (usb_pipebulk(pipe) &&
402 bus_to_hcd(dev->bus)->driver->flags & HCD_USB3) {
403 io->urbs = kmalloc(sizeof *io->urbs, mem_flags); 397 io->urbs = kmalloc(sizeof *io->urbs, mem_flags);
404 use_sg = true; 398 use_sg = true;
405 } else { 399 } else {
@@ -409,7 +403,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
409 if (!io->urbs) 403 if (!io->urbs)
410 goto nomem; 404 goto nomem;
411 405
412 urb_flags = URB_NO_INTERRUPT; 406 urb_flags = 0;
413 if (dma) 407 if (dma)
414 urb_flags |= URB_NO_TRANSFER_DMA_MAP; 408 urb_flags |= URB_NO_TRANSFER_DMA_MAP;
415 if (usb_pipein(pipe)) 409 if (usb_pipein(pipe))
@@ -441,6 +435,7 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
441 io->urbs[0]->num_sgs = io->entries; 435 io->urbs[0]->num_sgs = io->entries;
442 io->entries = 1; 436 io->entries = 1;
443 } else { 437 } else {
438 urb_flags |= URB_NO_INTERRUPT;
444 for_each_sg(sg, sg, io->entries, i) { 439 for_each_sg(sg, sg, io->entries, i) {
445 unsigned len; 440 unsigned len;
446 441
@@ -1303,6 +1298,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1303{ 1298{
1304 struct usb_interface *iface; 1299 struct usb_interface *iface;
1305 struct usb_host_interface *alt; 1300 struct usb_host_interface *alt;
1301 struct usb_hcd *hcd = bus_to_hcd(dev->bus);
1306 int ret; 1302 int ret;
1307 int manual = 0; 1303 int manual = 0;
1308 unsigned int epaddr; 1304 unsigned int epaddr;
@@ -1325,6 +1321,18 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1325 return -EINVAL; 1321 return -EINVAL;
1326 } 1322 }
1327 1323
1324 /* Make sure we have enough bandwidth for this alternate interface.
1325 * Remove the current alt setting and add the new alt setting.
1326 */
1327 mutex_lock(&hcd->bandwidth_mutex);
1328 ret = usb_hcd_alloc_bandwidth(dev, NULL, iface->cur_altsetting, alt);
1329 if (ret < 0) {
1330 dev_info(&dev->dev, "Not enough bandwidth for altsetting %d\n",
1331 alternate);
1332 mutex_unlock(&hcd->bandwidth_mutex);
1333 return ret;
1334 }
1335
1328 if (dev->quirks & USB_QUIRK_NO_SET_INTF) 1336 if (dev->quirks & USB_QUIRK_NO_SET_INTF)
1329 ret = -EPIPE; 1337 ret = -EPIPE;
1330 else 1338 else
@@ -1340,8 +1348,13 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
1340 "manual set_interface for iface %d, alt %d\n", 1348 "manual set_interface for iface %d, alt %d\n",
1341 interface, alternate); 1349 interface, alternate);
1342 manual = 1; 1350 manual = 1;
1343 } else if (ret < 0) 1351 } else if (ret < 0) {
1352 /* Re-instate the old alt setting */
1353 usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting);
1354 mutex_unlock(&hcd->bandwidth_mutex);
1344 return ret; 1355 return ret;
1356 }
1357 mutex_unlock(&hcd->bandwidth_mutex);
1345 1358
1346 /* FIXME drivers shouldn't need to replicate/bugfix the logic here 1359 /* FIXME drivers shouldn't need to replicate/bugfix the logic here
1347 * when they implement async or easily-killable versions of this or 1360 * when they implement async or easily-killable versions of this or
@@ -1423,6 +1436,7 @@ int usb_reset_configuration(struct usb_device *dev)
1423{ 1436{
1424 int i, retval; 1437 int i, retval;
1425 struct usb_host_config *config; 1438 struct usb_host_config *config;
1439 struct usb_hcd *hcd = bus_to_hcd(dev->bus);
1426 1440
1427 if (dev->state == USB_STATE_SUSPENDED) 1441 if (dev->state == USB_STATE_SUSPENDED)
1428 return -EHOSTUNREACH; 1442 return -EHOSTUNREACH;
@@ -1438,12 +1452,46 @@ int usb_reset_configuration(struct usb_device *dev)
1438 } 1452 }
1439 1453
1440 config = dev->actconfig; 1454 config = dev->actconfig;
1455 retval = 0;
1456 mutex_lock(&hcd->bandwidth_mutex);
1457 /* Make sure we have enough bandwidth for each alternate setting 0 */
1458 for (i = 0; i < config->desc.bNumInterfaces; i++) {
1459 struct usb_interface *intf = config->interface[i];
1460 struct usb_host_interface *alt;
1461
1462 alt = usb_altnum_to_altsetting(intf, 0);
1463 if (!alt)
1464 alt = &intf->altsetting[0];
1465 if (alt != intf->cur_altsetting)
1466 retval = usb_hcd_alloc_bandwidth(dev, NULL,
1467 intf->cur_altsetting, alt);
1468 if (retval < 0)
1469 break;
1470 }
1471 /* If not, reinstate the old alternate settings */
1472 if (retval < 0) {
1473reset_old_alts:
1474 for (; i >= 0; i--) {
1475 struct usb_interface *intf = config->interface[i];
1476 struct usb_host_interface *alt;
1477
1478 alt = usb_altnum_to_altsetting(intf, 0);
1479 if (!alt)
1480 alt = &intf->altsetting[0];
1481 if (alt != intf->cur_altsetting)
1482 usb_hcd_alloc_bandwidth(dev, NULL,
1483 alt, intf->cur_altsetting);
1484 }
1485 mutex_unlock(&hcd->bandwidth_mutex);
1486 return retval;
1487 }
1441 retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1488 retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
1442 USB_REQ_SET_CONFIGURATION, 0, 1489 USB_REQ_SET_CONFIGURATION, 0,
1443 config->desc.bConfigurationValue, 0, 1490 config->desc.bConfigurationValue, 0,
1444 NULL, 0, USB_CTRL_SET_TIMEOUT); 1491 NULL, 0, USB_CTRL_SET_TIMEOUT);
1445 if (retval < 0) 1492 if (retval < 0)
1446 return retval; 1493 goto reset_old_alts;
1494 mutex_unlock(&hcd->bandwidth_mutex);
1447 1495
1448 /* re-init hc/hcd interface/endpoint state */ 1496 /* re-init hc/hcd interface/endpoint state */
1449 for (i = 0; i < config->desc.bNumInterfaces; i++) { 1497 for (i = 0; i < config->desc.bNumInterfaces; i++) {
@@ -1585,7 +1633,7 @@ static struct usb_interface_assoc_descriptor *find_iad(struct usb_device *dev,
1585 * 1633 *
1586 * See usb_queue_reset_device() for more details 1634 * See usb_queue_reset_device() for more details
1587 */ 1635 */
1588void __usb_queue_reset_device(struct work_struct *ws) 1636static void __usb_queue_reset_device(struct work_struct *ws)
1589{ 1637{
1590 int rc; 1638 int rc;
1591 struct usb_interface *iface = 1639 struct usb_interface *iface =
@@ -1652,6 +1700,7 @@ int usb_set_configuration(struct usb_device *dev, int configuration)
1652 int i, ret; 1700 int i, ret;
1653 struct usb_host_config *cp = NULL; 1701 struct usb_host_config *cp = NULL;
1654 struct usb_interface **new_interfaces = NULL; 1702 struct usb_interface **new_interfaces = NULL;
1703 struct usb_hcd *hcd = bus_to_hcd(dev->bus);
1655 int n, nintf; 1704 int n, nintf;
1656 1705
1657 if (dev->authorized == 0 || configuration == -1) 1706 if (dev->authorized == 0 || configuration == -1)
@@ -1721,12 +1770,11 @@ free_interfaces:
1721 * host controller will not allow submissions to dropped endpoints. If 1770 * host controller will not allow submissions to dropped endpoints. If
1722 * this call fails, the device state is unchanged. 1771 * this call fails, the device state is unchanged.
1723 */ 1772 */
1724 if (cp) 1773 mutex_lock(&hcd->bandwidth_mutex);
1725 ret = usb_hcd_check_bandwidth(dev, cp, NULL); 1774 ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
1726 else
1727 ret = usb_hcd_check_bandwidth(dev, NULL, NULL);
1728 if (ret < 0) { 1775 if (ret < 0) {
1729 usb_autosuspend_device(dev); 1776 usb_autosuspend_device(dev);
1777 mutex_unlock(&hcd->bandwidth_mutex);
1730 goto free_interfaces; 1778 goto free_interfaces;
1731 } 1779 }
1732 1780
@@ -1752,10 +1800,12 @@ free_interfaces:
1752 dev->actconfig = cp; 1800 dev->actconfig = cp;
1753 if (!cp) { 1801 if (!cp) {
1754 usb_set_device_state(dev, USB_STATE_ADDRESS); 1802 usb_set_device_state(dev, USB_STATE_ADDRESS);
1755 usb_hcd_check_bandwidth(dev, NULL, NULL); 1803 usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
1756 usb_autosuspend_device(dev); 1804 usb_autosuspend_device(dev);
1805 mutex_unlock(&hcd->bandwidth_mutex);
1757 goto free_interfaces; 1806 goto free_interfaces;
1758 } 1807 }
1808 mutex_unlock(&hcd->bandwidth_mutex);
1759 usb_set_device_state(dev, USB_STATE_CONFIGURED); 1809 usb_set_device_state(dev, USB_STATE_CONFIGURED);
1760 1810
1761 /* Initialize the new interface structures and the 1811 /* Initialize the new interface structures and the
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 7ec3041ae79e..15477008b631 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -139,6 +139,16 @@ show_devnum(struct device *dev, struct device_attribute *attr, char *buf)
139static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL); 139static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
140 140
141static ssize_t 141static ssize_t
142show_devpath(struct device *dev, struct device_attribute *attr, char *buf)
143{
144 struct usb_device *udev;
145
146 udev = to_usb_device(dev);
147 return sprintf(buf, "%s\n", udev->devpath);
148}
149static DEVICE_ATTR(devpath, S_IRUGO, show_devpath, NULL);
150
151static ssize_t
142show_version(struct device *dev, struct device_attribute *attr, char *buf) 152show_version(struct device *dev, struct device_attribute *attr, char *buf)
143{ 153{
144 struct usb_device *udev; 154 struct usb_device *udev;
@@ -317,7 +327,6 @@ static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR,
317 327
318static const char on_string[] = "on"; 328static const char on_string[] = "on";
319static const char auto_string[] = "auto"; 329static const char auto_string[] = "auto";
320static const char suspend_string[] = "suspend";
321 330
322static ssize_t 331static ssize_t
323show_level(struct device *dev, struct device_attribute *attr, char *buf) 332show_level(struct device *dev, struct device_attribute *attr, char *buf)
@@ -325,13 +334,8 @@ show_level(struct device *dev, struct device_attribute *attr, char *buf)
325 struct usb_device *udev = to_usb_device(dev); 334 struct usb_device *udev = to_usb_device(dev);
326 const char *p = auto_string; 335 const char *p = auto_string;
327 336
328 if (udev->state == USB_STATE_SUSPENDED) { 337 if (udev->state != USB_STATE_SUSPENDED && udev->autosuspend_disabled)
329 if (udev->autoresume_disabled) 338 p = on_string;
330 p = suspend_string;
331 } else {
332 if (udev->autosuspend_disabled)
333 p = on_string;
334 }
335 return sprintf(buf, "%s\n", p); 339 return sprintf(buf, "%s\n", p);
336} 340}
337 341
@@ -343,7 +347,7 @@ set_level(struct device *dev, struct device_attribute *attr,
343 int len = count; 347 int len = count;
344 char *cp; 348 char *cp;
345 int rc = 0; 349 int rc = 0;
346 int old_autosuspend_disabled, old_autoresume_disabled; 350 int old_autosuspend_disabled;
347 351
348 cp = memchr(buf, '\n', count); 352 cp = memchr(buf, '\n', count);
349 if (cp) 353 if (cp)
@@ -351,7 +355,6 @@ set_level(struct device *dev, struct device_attribute *attr,
351 355
352 usb_lock_device(udev); 356 usb_lock_device(udev);
353 old_autosuspend_disabled = udev->autosuspend_disabled; 357 old_autosuspend_disabled = udev->autosuspend_disabled;
354 old_autoresume_disabled = udev->autoresume_disabled;
355 358
356 /* Setting the flags without calling usb_pm_lock is a subject to 359 /* Setting the flags without calling usb_pm_lock is a subject to
357 * races, but who cares... 360 * races, but who cares...
@@ -359,28 +362,18 @@ set_level(struct device *dev, struct device_attribute *attr,
359 if (len == sizeof on_string - 1 && 362 if (len == sizeof on_string - 1 &&
360 strncmp(buf, on_string, len) == 0) { 363 strncmp(buf, on_string, len) == 0) {
361 udev->autosuspend_disabled = 1; 364 udev->autosuspend_disabled = 1;
362 udev->autoresume_disabled = 0;
363 rc = usb_external_resume_device(udev, PMSG_USER_RESUME); 365 rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
364 366
365 } else if (len == sizeof auto_string - 1 && 367 } else if (len == sizeof auto_string - 1 &&
366 strncmp(buf, auto_string, len) == 0) { 368 strncmp(buf, auto_string, len) == 0) {
367 udev->autosuspend_disabled = 0; 369 udev->autosuspend_disabled = 0;
368 udev->autoresume_disabled = 0;
369 rc = usb_external_resume_device(udev, PMSG_USER_RESUME); 370 rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
370 371
371 } else if (len == sizeof suspend_string - 1 &&
372 strncmp(buf, suspend_string, len) == 0) {
373 udev->autosuspend_disabled = 0;
374 udev->autoresume_disabled = 1;
375 rc = usb_external_suspend_device(udev, PMSG_USER_SUSPEND);
376
377 } else 372 } else
378 rc = -EINVAL; 373 rc = -EINVAL;
379 374
380 if (rc) { 375 if (rc)
381 udev->autosuspend_disabled = old_autosuspend_disabled; 376 udev->autosuspend_disabled = old_autosuspend_disabled;
382 udev->autoresume_disabled = old_autoresume_disabled;
383 }
384 usb_unlock_device(udev); 377 usb_unlock_device(udev);
385 return (rc < 0 ? rc : count); 378 return (rc < 0 ? rc : count);
386} 379}
@@ -508,6 +501,28 @@ static ssize_t usb_dev_authorized_store(struct device *dev,
508static DEVICE_ATTR(authorized, 0644, 501static DEVICE_ATTR(authorized, 0644,
509 usb_dev_authorized_show, usb_dev_authorized_store); 502 usb_dev_authorized_show, usb_dev_authorized_store);
510 503
504/* "Safely remove a device" */
505static ssize_t usb_remove_store(struct device *dev,
506 struct device_attribute *attr,
507 const char *buf, size_t count)
508{
509 struct usb_device *udev = to_usb_device(dev);
510 int rc = 0;
511
512 usb_lock_device(udev);
513 if (udev->state != USB_STATE_NOTATTACHED) {
514
515 /* To avoid races, first unconfigure and then remove */
516 usb_set_configuration(udev, -1);
517 rc = usb_remove_device(udev);
518 }
519 if (rc == 0)
520 rc = count;
521 usb_unlock_device(udev);
522 return rc;
523}
524static DEVICE_ATTR(remove, 0200, NULL, usb_remove_store);
525
511 526
512static struct attribute *dev_attrs[] = { 527static struct attribute *dev_attrs[] = {
513 /* current configuration's attributes */ 528 /* current configuration's attributes */
@@ -516,8 +531,8 @@ static struct attribute *dev_attrs[] = {
516 &dev_attr_bConfigurationValue.attr, 531 &dev_attr_bConfigurationValue.attr,
517 &dev_attr_bmAttributes.attr, 532 &dev_attr_bmAttributes.attr,
518 &dev_attr_bMaxPower.attr, 533 &dev_attr_bMaxPower.attr,
519 &dev_attr_urbnum.attr,
520 /* device attributes */ 534 /* device attributes */
535 &dev_attr_urbnum.attr,
521 &dev_attr_idVendor.attr, 536 &dev_attr_idVendor.attr,
522 &dev_attr_idProduct.attr, 537 &dev_attr_idProduct.attr,
523 &dev_attr_bcdDevice.attr, 538 &dev_attr_bcdDevice.attr,
@@ -529,10 +544,12 @@ static struct attribute *dev_attrs[] = {
529 &dev_attr_speed.attr, 544 &dev_attr_speed.attr,
530 &dev_attr_busnum.attr, 545 &dev_attr_busnum.attr,
531 &dev_attr_devnum.attr, 546 &dev_attr_devnum.attr,
547 &dev_attr_devpath.attr,
532 &dev_attr_version.attr, 548 &dev_attr_version.attr,
533 &dev_attr_maxchild.attr, 549 &dev_attr_maxchild.attr,
534 &dev_attr_quirks.attr, 550 &dev_attr_quirks.attr,
535 &dev_attr_authorized.attr, 551 &dev_attr_authorized.attr,
552 &dev_attr_remove.attr,
536 NULL, 553 NULL,
537}; 554};
538static struct attribute_group dev_attr_grp = { 555static struct attribute_group dev_attr_grp = {
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 0885d4abdc62..e7cae1334693 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -429,8 +429,16 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
429 case USB_ENDPOINT_XFER_ISOC: 429 case USB_ENDPOINT_XFER_ISOC:
430 case USB_ENDPOINT_XFER_INT: 430 case USB_ENDPOINT_XFER_INT:
431 /* too small? */ 431 /* too small? */
432 if (urb->interval <= 0) 432 switch (dev->speed) {
433 return -EINVAL; 433 case USB_SPEED_VARIABLE:
434 if (urb->interval < 6)
435 return -EINVAL;
436 break;
437 default:
438 if (urb->interval <= 0)
439 return -EINVAL;
440 break;
441 }
434 /* too big? */ 442 /* too big? */
435 switch (dev->speed) { 443 switch (dev->speed) {
436 case USB_SPEED_SUPER: /* units are 125us */ 444 case USB_SPEED_SUPER: /* units are 125us */
@@ -438,6 +446,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
438 if (urb->interval > (1 << 15)) 446 if (urb->interval > (1 << 15))
439 return -EINVAL; 447 return -EINVAL;
440 max = 1 << 15; 448 max = 1 << 15;
449 case USB_SPEED_VARIABLE:
450 if (urb->interval > 16)
451 return -EINVAL;
452 break;
441 case USB_SPEED_HIGH: /* units are microframes */ 453 case USB_SPEED_HIGH: /* units are microframes */
442 /* NOTE usb handles 2^15 */ 454 /* NOTE usb handles 2^15 */
443 if (urb->interval > (1024 * 8)) 455 if (urb->interval > (1024 * 8))
@@ -461,8 +473,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
461 default: 473 default:
462 return -EINVAL; 474 return -EINVAL;
463 } 475 }
464 /* Round down to a power of 2, no more than max */ 476 if (dev->speed != USB_SPEED_VARIABLE) {
465 urb->interval = min(max, 1 << ilog2(urb->interval)); 477 /* Round down to a power of 2, no more than max */
478 urb->interval = min(max, 1 << ilog2(urb->interval));
479 }
466 } 480 }
467 481
468 return usb_hcd_submit_urb(urb, mem_flags); 482 return usb_hcd_submit_urb(urb, mem_flags);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index b1b85abb9a2d..4e2c6df8d3cc 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -64,6 +64,43 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
64 64
65 65
66/** 66/**
67 * usb_find_alt_setting() - Given a configuration, find the alternate setting
68 * for the given interface.
69 * @config - the configuration to search (not necessarily the current config).
70 * @iface_num - interface number to search in
71 * @alt_num - alternate interface setting number to search for.
72 *
73 * Search the configuration's interface cache for the given alt setting.
74 */
75struct usb_host_interface *usb_find_alt_setting(
76 struct usb_host_config *config,
77 unsigned int iface_num,
78 unsigned int alt_num)
79{
80 struct usb_interface_cache *intf_cache = NULL;
81 int i;
82
83 for (i = 0; i < config->desc.bNumInterfaces; i++) {
84 if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
85 == iface_num) {
86 intf_cache = config->intf_cache[i];
87 break;
88 }
89 }
90 if (!intf_cache)
91 return NULL;
92 for (i = 0; i < intf_cache->num_altsetting; i++)
93 if (intf_cache->altsetting[i].desc.bAlternateSetting == alt_num)
94 return &intf_cache->altsetting[i];
95
96 printk(KERN_DEBUG "Did not find alt setting %u for intf %u, "
97 "config %u\n", alt_num, iface_num,
98 config->desc.bConfigurationValue);
99 return NULL;
100}
101EXPORT_SYMBOL_GPL(usb_find_alt_setting);
102
103/**
67 * usb_ifnum_to_if - get the interface object with a given interface number 104 * usb_ifnum_to_if - get the interface object with a given interface number
68 * @dev: the device whose current configuration is considered 105 * @dev: the device whose current configuration is considered
69 * @ifnum: the desired interface 106 * @ifnum: the desired interface
@@ -130,24 +167,17 @@ struct usb_host_interface *usb_altnum_to_altsetting(
130} 167}
131EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting); 168EXPORT_SYMBOL_GPL(usb_altnum_to_altsetting);
132 169
133struct find_interface_arg {
134 int minor;
135 struct usb_interface *interface;
136};
137
138static int __find_interface(struct device *dev, void *data) 170static int __find_interface(struct device *dev, void *data)
139{ 171{
140 struct find_interface_arg *arg = data; 172 int *minor = data;
141 struct usb_interface *intf; 173 struct usb_interface *intf;
142 174
143 if (!is_usb_interface(dev)) 175 if (!is_usb_interface(dev))
144 return 0; 176 return 0;
145 177
146 intf = to_usb_interface(dev); 178 intf = to_usb_interface(dev);
147 if (intf->minor != -1 && intf->minor == arg->minor) { 179 if (intf->minor != -1 && intf->minor == *minor)
148 arg->interface = intf;
149 return 1; 180 return 1;
150 }
151 return 0; 181 return 0;
152} 182}
153 183
@@ -156,21 +186,20 @@ static int __find_interface(struct device *dev, void *data)
156 * @drv: the driver whose current configuration is considered 186 * @drv: the driver whose current configuration is considered
157 * @minor: the minor number of the desired device 187 * @minor: the minor number of the desired device
158 * 188 *
159 * This walks the driver device list and returns a pointer to the interface 189 * This walks the bus device list and returns a pointer to the interface
160 * with the matching minor. Note, this only works for devices that share the 190 * with the matching minor. Note, this only works for devices that share the
161 * USB major number. 191 * USB major number.
162 */ 192 */
163struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor) 193struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
164{ 194{
165 struct find_interface_arg argb; 195 struct device *dev;
166 int retval; 196
197 dev = bus_find_device(&usb_bus_type, NULL, &minor, __find_interface);
198
199 /* Drop reference count from bus_find_device */
200 put_device(dev);
167 201
168 argb.minor = minor; 202 return dev ? to_usb_interface(dev) : NULL;
169 argb.interface = NULL;
170 /* eat the error, it will be in argb.interface */
171 retval = driver_for_each_device(&drv->drvwrap.driver, NULL, &argb,
172 __find_interface);
173 return argb.interface;
174} 203}
175EXPORT_SYMBOL_GPL(usb_find_interface); 204EXPORT_SYMBOL_GPL(usb_find_interface);
176 205
@@ -1038,7 +1067,7 @@ static struct notifier_block usb_bus_nb = {
1038struct dentry *usb_debug_root; 1067struct dentry *usb_debug_root;
1039EXPORT_SYMBOL_GPL(usb_debug_root); 1068EXPORT_SYMBOL_GPL(usb_debug_root);
1040 1069
1041struct dentry *usb_debug_devices; 1070static struct dentry *usb_debug_devices;
1042 1071
1043static int usb_debugfs_init(void) 1072static int usb_debugfs_init(void)
1044{ 1073{
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 9a8b15e6377a..4c36c7f512a0 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -24,6 +24,7 @@ extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
24extern int usb_deauthorize_device(struct usb_device *); 24extern int usb_deauthorize_device(struct usb_device *);
25extern int usb_authorize_device(struct usb_device *); 25extern int usb_authorize_device(struct usb_device *);
26extern void usb_detect_quirks(struct usb_device *udev); 26extern void usb_detect_quirks(struct usb_device *udev);
27extern int usb_remove_device(struct usb_device *udev);
27 28
28extern int usb_get_device_descriptor(struct usb_device *dev, 29extern int usb_get_device_descriptor(struct usb_device *dev,
29 unsigned int size); 30 unsigned int size);
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index a18e3c5dd82e..ee411206c699 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -732,6 +732,24 @@ config USB_FILE_STORAGE_TEST
732 behavior of USB Mass Storage hosts. Not needed for 732 behavior of USB Mass Storage hosts. Not needed for
733 normal operation. 733 normal operation.
734 734
735config USB_MASS_STORAGE
736 tristate "Mass Storage Gadget"
737 depends on BLOCK
738 help
739 The Mass Storage Gadget acts as a USB Mass Storage disk drive.
740 As its storage repository it can use a regular file or a block
741 device (in much the same way as the "loop" device driver),
742 specified as a module parameter or sysfs option.
743
744 This is heavily based on File-backed Storage Gadget and in most
745 cases you will want to use FSG instead. This gadget is mostly
746 here to test the functionality of the Mass Storage Function
747 which may be used with composite framework.
748
749 Say "y" to link the driver statically, or "m" to build
750 a dynamically linked module called "g_file_storage". If unsure,
751 consider File-backed Storage Gadget.
752
735config USB_G_SERIAL 753config USB_G_SERIAL
736 tristate "Serial Gadget (with CDC ACM and CDC OBEX support)" 754 tristate "Serial Gadget (with CDC ACM and CDC OBEX support)"
737 help 755 help
@@ -794,6 +812,48 @@ config USB_CDC_COMPOSITE
794 Say "y" to link the driver statically, or "m" to build a 812 Say "y" to link the driver statically, or "m" to build a
795 dynamically linked module. 813 dynamically linked module.
796 814
815config USB_G_MULTI
816 tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
817 depends on BLOCK && NET
818 help
819 The Multifunction Composite Gadget provides Ethernet (RNDIS
820 and/or CDC Ethernet), mass storage and ACM serial link
821 interfaces.
822
823 You will be asked to choose which of the two configurations is
824 to be available in the gadget. At least one configuration must
825 be chosen to make the gadget usable. Selecting more than one
826 configuration will prevent Windows from automatically detecting
827 the gadget as a composite gadget, so an INF file will be needed to
828 use the gadget.
829
830 Say "y" to link the driver statically, or "m" to build a
831 dynamically linked module called "g_multi".
832
833config USB_G_MULTI_RNDIS
834 bool "RNDIS + CDC Serial + Storage configuration"
835 depends on USB_G_MULTI
836 default y
837 help
838 This option enables a configuration with RNDIS, CDC Serial and
839 Mass Storage functions available in the Multifunction Composite
840 Gadget. This is the configuration dedicated for Windows since RNDIS
841 is Microsoft's protocol.
842
843 If unsure, say "y".
844
845config USB_G_MULTI_CDC
846 bool "CDC Ethernet + CDC Serial + Storage configuration"
847 depends on USB_G_MULTI
848 default n
849 help
850 This option enables a configuration with CDC Ethernet (ECM), CDC
851 Serial and Mass Storage functions available in the Multifunction
852 Composite Gadget.
853
854 If unsure, say "y".
855
856
797# put drivers that need isochronous transfer support (for audio 857# put drivers that need isochronous transfer support (for audio
798# or video class gadget drivers), or specific hardware, here. 858# or video class gadget drivers), or specific hardware, here.
799 859
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 9d7b87c52e9f..2e2c047262b7 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -39,16 +39,20 @@ g_serial-objs := serial.o
39g_midi-objs := gmidi.o 39g_midi-objs := gmidi.o
40gadgetfs-objs := inode.o 40gadgetfs-objs := inode.o
41g_file_storage-objs := file_storage.o 41g_file_storage-objs := file_storage.o
42g_mass_storage-objs := mass_storage.o
42g_printer-objs := printer.o 43g_printer-objs := printer.o
43g_cdc-objs := cdc2.o 44g_cdc-objs := cdc2.o
45g_multi-objs := multi.o
44 46
45obj-$(CONFIG_USB_ZERO) += g_zero.o 47obj-$(CONFIG_USB_ZERO) += g_zero.o
46obj-$(CONFIG_USB_AUDIO) += g_audio.o 48obj-$(CONFIG_USB_AUDIO) += g_audio.o
47obj-$(CONFIG_USB_ETH) += g_ether.o 49obj-$(CONFIG_USB_ETH) += g_ether.o
48obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o 50obj-$(CONFIG_USB_GADGETFS) += gadgetfs.o
49obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o 51obj-$(CONFIG_USB_FILE_STORAGE) += g_file_storage.o
52obj-$(CONFIG_USB_MASS_STORAGE) += g_mass_storage.o
50obj-$(CONFIG_USB_G_SERIAL) += g_serial.o 53obj-$(CONFIG_USB_G_SERIAL) += g_serial.o
51obj-$(CONFIG_USB_G_PRINTER) += g_printer.o 54obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
52obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o 55obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
53obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o 56obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
57obj-$(CONFIG_USB_G_MULTI) += g_multi.o
54 58
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 66450a1abc22..043e04db2a05 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -892,7 +892,7 @@ static void pullup(struct at91_udc *udc, int is_on)
892 892
893 txvc |= AT91_UDP_TXVC_PUON; 893 txvc |= AT91_UDP_TXVC_PUON;
894 at91_udp_write(udc, AT91_UDP_TXVC, txvc); 894 at91_udp_write(udc, AT91_UDP_TXVC, txvc);
895 } else if (cpu_is_at91sam9261()) { 895 } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
896 u32 usbpucr; 896 u32 usbpucr;
897 897
898 usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR); 898 usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
@@ -910,7 +910,7 @@ static void pullup(struct at91_udc *udc, int is_on)
910 910
911 txvc &= ~AT91_UDP_TXVC_PUON; 911 txvc &= ~AT91_UDP_TXVC_PUON;
912 at91_udp_write(udc, AT91_UDP_TXVC, txvc); 912 at91_udp_write(udc, AT91_UDP_TXVC, txvc);
913 } else if (cpu_is_at91sam9261()) { 913 } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
914 u32 usbpucr; 914 u32 usbpucr;
915 915
916 usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR); 916 usbpucr = at91_sys_read(AT91_MATRIX_USBPUCR);
@@ -1692,7 +1692,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
1692 udc->ep[3].maxpacket = 64; 1692 udc->ep[3].maxpacket = 64;
1693 udc->ep[4].maxpacket = 512; 1693 udc->ep[4].maxpacket = 512;
1694 udc->ep[5].maxpacket = 512; 1694 udc->ep[5].maxpacket = 512;
1695 } else if (cpu_is_at91sam9261()) { 1695 } else if (cpu_is_at91sam9261() || cpu_is_at91sam9g10()) {
1696 udc->ep[3].maxpacket = 64; 1696 udc->ep[3].maxpacket = 64;
1697 } else if (cpu_is_at91sam9263()) { 1697 } else if (cpu_is_at91sam9263()) {
1698 udc->ep[0].maxpacket = 64; 1698 udc->ep[0].maxpacket = 64;
diff --git a/drivers/usb/gadget/audio.c b/drivers/usb/gadget/audio.c
index a3a0f4a27ef0..58f220323847 100644
--- a/drivers/usb/gadget/audio.c
+++ b/drivers/usb/gadget/audio.c
@@ -89,120 +89,6 @@ static const struct usb_descriptor_header *otg_desc[] = {
89 89
90/*-------------------------------------------------------------------------*/ 90/*-------------------------------------------------------------------------*/
91 91
92/**
93 * Handle USB audio endpoint set/get command in setup class request
94 */
95
96static int audio_set_endpoint_req(struct usb_configuration *c,
97 const struct usb_ctrlrequest *ctrl)
98{
99 struct usb_composite_dev *cdev = c->cdev;
100 int value = -EOPNOTSUPP;
101 u16 ep = le16_to_cpu(ctrl->wIndex);
102 u16 len = le16_to_cpu(ctrl->wLength);
103 u16 w_value = le16_to_cpu(ctrl->wValue);
104
105 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
106 ctrl->bRequest, w_value, len, ep);
107
108 switch (ctrl->bRequest) {
109 case UAC_SET_CUR:
110 value = 0;
111 break;
112
113 case UAC_SET_MIN:
114 break;
115
116 case UAC_SET_MAX:
117 break;
118
119 case UAC_SET_RES:
120 break;
121
122 case UAC_SET_MEM:
123 break;
124
125 default:
126 break;
127 }
128
129 return value;
130}
131
132static int audio_get_endpoint_req(struct usb_configuration *c,
133 const struct usb_ctrlrequest *ctrl)
134{
135 struct usb_composite_dev *cdev = c->cdev;
136 int value = -EOPNOTSUPP;
137 u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
138 u16 len = le16_to_cpu(ctrl->wLength);
139 u16 w_value = le16_to_cpu(ctrl->wValue);
140
141 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
142 ctrl->bRequest, w_value, len, ep);
143
144 switch (ctrl->bRequest) {
145 case UAC_GET_CUR:
146 case UAC_GET_MIN:
147 case UAC_GET_MAX:
148 case UAC_GET_RES:
149 value = 3;
150 break;
151 case UAC_GET_MEM:
152 break;
153 default:
154 break;
155 }
156
157 return value;
158}
159
160static int
161audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
162{
163 struct usb_composite_dev *cdev = c->cdev;
164 struct usb_request *req = cdev->req;
165 int value = -EOPNOTSUPP;
166 u16 w_index = le16_to_cpu(ctrl->wIndex);
167 u16 w_value = le16_to_cpu(ctrl->wValue);
168 u16 w_length = le16_to_cpu(ctrl->wLength);
169
170 /* composite driver infrastructure handles everything except
171 * Audio class messages; interface activation uses set_alt().
172 */
173 switch (ctrl->bRequestType) {
174 case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
175 value = audio_set_endpoint_req(c, ctrl);
176 break;
177
178 case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
179 value = audio_get_endpoint_req(c, ctrl);
180 break;
181
182 default:
183 ERROR(cdev, "Invalid control req%02x.%02x v%04x i%04x l%d\n",
184 ctrl->bRequestType, ctrl->bRequest,
185 w_value, w_index, w_length);
186 }
187
188 /* respond with data transfer or status phase? */
189 if (value >= 0) {
190 DBG(cdev, "Audio req%02x.%02x v%04x i%04x l%d\n",
191 ctrl->bRequestType, ctrl->bRequest,
192 w_value, w_index, w_length);
193 req->zero = 0;
194 req->length = value;
195 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
196 if (value < 0)
197 ERROR(cdev, "Audio response on err %d\n", value);
198 }
199
200 /* device either stalls (value < 0) or reports success */
201 return value;
202}
203
204/*-------------------------------------------------------------------------*/
205
206static int __init audio_do_config(struct usb_configuration *c) 92static int __init audio_do_config(struct usb_configuration *c)
207{ 93{
208 /* FIXME alloc iConfiguration string, set it in c->strings */ 94 /* FIXME alloc iConfiguration string, set it in c->strings */
@@ -220,7 +106,6 @@ static int __init audio_do_config(struct usb_configuration *c)
220static struct usb_configuration audio_config_driver = { 106static struct usb_configuration audio_config_driver = {
221 .label = DRIVER_DESC, 107 .label = DRIVER_DESC,
222 .bind = audio_do_config, 108 .bind = audio_do_config,
223 .setup = audio_setup,
224 .bConfigurationValue = 1, 109 .bConfigurationValue = 1,
225 /* .iConfiguration = DYNAMIC */ 110 /* .iConfiguration = DYNAMIC */
226 .bmAttributes = USB_CONFIG_ATT_SELFPOWER, 111 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d05397ec8a18..09289bb1e20f 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -373,6 +373,8 @@ static void reset_config(struct usb_composite_dev *cdev)
373 list_for_each_entry(f, &cdev->config->functions, list) { 373 list_for_each_entry(f, &cdev->config->functions, list) {
374 if (f->disable) 374 if (f->disable)
375 f->disable(f); 375 f->disable(f);
376
377 bitmap_zero(f->endpoints, 32);
376 } 378 }
377 cdev->config = NULL; 379 cdev->config = NULL;
378} 380}
@@ -418,10 +420,35 @@ static int set_config(struct usb_composite_dev *cdev,
418 /* Initialize all interfaces by setting them to altsetting zero. */ 420 /* Initialize all interfaces by setting them to altsetting zero. */
419 for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) { 421 for (tmp = 0; tmp < MAX_CONFIG_INTERFACES; tmp++) {
420 struct usb_function *f = c->interface[tmp]; 422 struct usb_function *f = c->interface[tmp];
423 struct usb_descriptor_header **descriptors;
421 424
422 if (!f) 425 if (!f)
423 break; 426 break;
424 427
428 /*
429 * Record which endpoints are used by the function. This is used
430 * to dispatch control requests targeted at that endpoint to the
431 * function's setup callback instead of the current
432 * configuration's setup callback.
433 */
434 if (gadget->speed == USB_SPEED_HIGH)
435 descriptors = f->hs_descriptors;
436 else
437 descriptors = f->descriptors;
438
439 for (; *descriptors; ++descriptors) {
440 struct usb_endpoint_descriptor *ep;
441 int addr;
442
443 if ((*descriptors)->bDescriptorType != USB_DT_ENDPOINT)
444 continue;
445
446 ep = (struct usb_endpoint_descriptor *)*descriptors;
447 addr = ((ep->bEndpointAddress & 0x80) >> 3)
448 | (ep->bEndpointAddress & 0x0f);
449 set_bit(addr, f->endpoints);
450 }
451
425 result = f->set_alt(f, tmp, 0); 452 result = f->set_alt(f, tmp, 0);
426 if (result < 0) { 453 if (result < 0) {
427 DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n", 454 DBG(cdev, "interface %d (%s/%p) alt 0 --> %d\n",
@@ -688,6 +715,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
688 u16 w_value = le16_to_cpu(ctrl->wValue); 715 u16 w_value = le16_to_cpu(ctrl->wValue);
689 u16 w_length = le16_to_cpu(ctrl->wLength); 716 u16 w_length = le16_to_cpu(ctrl->wLength);
690 struct usb_function *f = NULL; 717 struct usb_function *f = NULL;
718 u8 endp;
691 719
692 /* partial re-init of the response message; the function or the 720 /* partial re-init of the response message; the function or the
693 * gadget might need to intercept e.g. a control-OUT completion 721 * gadget might need to intercept e.g. a control-OUT completion
@@ -800,23 +828,33 @@ unknown:
800 ctrl->bRequestType, ctrl->bRequest, 828 ctrl->bRequestType, ctrl->bRequest,
801 w_value, w_index, w_length); 829 w_value, w_index, w_length);
802 830
803 /* functions always handle their interfaces ... punt other 831 /* functions always handle their interfaces and endpoints...
804 * recipients (endpoint, other, WUSB, ...) to the current 832 * punt other recipients (other, WUSB, ...) to the current
805 * configuration code. 833 * configuration code.
806 * 834 *
807 * REVISIT it could make sense to let the composite device 835 * REVISIT it could make sense to let the composite device
808 * take such requests too, if that's ever needed: to work 836 * take such requests too, if that's ever needed: to work
809 * in config 0, etc. 837 * in config 0, etc.
810 */ 838 */
811 if ((ctrl->bRequestType & USB_RECIP_MASK) 839 switch (ctrl->bRequestType & USB_RECIP_MASK) {
812 == USB_RECIP_INTERFACE) { 840 case USB_RECIP_INTERFACE:
813 f = cdev->config->interface[intf]; 841 f = cdev->config->interface[intf];
814 if (f && f->setup) 842 break;
815 value = f->setup(f, ctrl); 843
816 else 844 case USB_RECIP_ENDPOINT:
845 endp = ((w_index & 0x80) >> 3) | (w_index & 0x0f);
846 list_for_each_entry(f, &cdev->config->functions, list) {
847 if (test_bit(endp, f->endpoints))
848 break;
849 }
850 if (&f->list == &cdev->config->functions)
817 f = NULL; 851 f = NULL;
852 break;
818 } 853 }
819 if (value < 0 && !f) { 854
855 if (f && f->setup)
856 value = f->setup(f, ctrl);
857 else {
820 struct usb_configuration *c; 858 struct usb_configuration *c;
821 859
822 c = cdev->config; 860 c = cdev->config;
@@ -1054,7 +1092,8 @@ static struct usb_gadget_driver composite_driver = {
1054 .speed = USB_SPEED_HIGH, 1092 .speed = USB_SPEED_HIGH,
1055 1093
1056 .bind = composite_bind, 1094 .bind = composite_bind,
1057 .unbind = __exit_p(composite_unbind), 1095 /* .unbind = __exit_p(composite_unbind), */
1096 .unbind = composite_unbind,
1058 1097
1059 .setup = composite_setup, 1098 .setup = composite_setup,
1060 .disconnect = composite_disconnect, 1099 .disconnect = composite_disconnect,
@@ -1103,7 +1142,7 @@ int __init usb_composite_register(struct usb_composite_driver *driver)
1103 * This function is used to unregister drivers using the composite 1142 * This function is used to unregister drivers using the composite
1104 * driver framework. 1143 * driver framework.
1105 */ 1144 */
1106void __exit usb_composite_unregister(struct usb_composite_driver *driver) 1145void /* __exit */ usb_composite_unregister(struct usb_composite_driver *driver)
1107{ 1146{
1108 if (composite != driver) 1147 if (composite != driver)
1109 return; 1148 return;
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 167cb2a8ecef..141372b6e7a1 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -25,6 +25,14 @@
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/utsname.h> 26#include <linux/utsname.h>
27 27
28
29#if defined USB_ETH_RNDIS
30# undef USB_ETH_RNDIS
31#endif
32#ifdef CONFIG_USB_ETH_RNDIS
33# define USB_ETH_RNDIS y
34#endif
35
28#include "u_ether.h" 36#include "u_ether.h"
29 37
30 38
@@ -66,7 +74,7 @@
66#define DRIVER_DESC "Ethernet Gadget" 74#define DRIVER_DESC "Ethernet Gadget"
67#define DRIVER_VERSION "Memorial Day 2008" 75#define DRIVER_VERSION "Memorial Day 2008"
68 76
69#ifdef CONFIG_USB_ETH_RNDIS 77#ifdef USB_ETH_RNDIS
70#define PREFIX "RNDIS/" 78#define PREFIX "RNDIS/"
71#else 79#else
72#define PREFIX "" 80#define PREFIX ""
@@ -87,7 +95,7 @@
87 95
88static inline bool has_rndis(void) 96static inline bool has_rndis(void)
89{ 97{
90#ifdef CONFIG_USB_ETH_RNDIS 98#ifdef USB_ETH_RNDIS
91 return true; 99 return true;
92#else 100#else
93 return false; 101 return false;
@@ -110,7 +118,7 @@ static inline bool has_rndis(void)
110 118
111#include "f_ecm.c" 119#include "f_ecm.c"
112#include "f_subset.c" 120#include "f_subset.c"
113#ifdef CONFIG_USB_ETH_RNDIS 121#ifdef USB_ETH_RNDIS
114#include "f_rndis.c" 122#include "f_rndis.c"
115#include "rndis.c" 123#include "rndis.c"
116#endif 124#endif
@@ -251,7 +259,7 @@ static struct usb_configuration rndis_config_driver = {
251 259
252/*-------------------------------------------------------------------------*/ 260/*-------------------------------------------------------------------------*/
253 261
254#ifdef CONFIG_USB_ETH_EEM 262#ifdef USB_ETH_EEM
255static int use_eem = 1; 263static int use_eem = 1;
256#else 264#else
257static int use_eem; 265static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 4e3657808b0f..d10353d46b86 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com) 4 * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
5 * Copyright (C) 2008 by David Brownell 5 * Copyright (C) 2008 by David Brownell
6 * Copyright (C) 2008 by Nokia Corporation 6 * Copyright (C) 2008 by Nokia Corporation
7 * Copyright (C) 2009 by Samsung Electronics
8 * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
7 * 9 *
8 * This software is distributed under the terms of the GNU General 10 * This software is distributed under the terms of the GNU General
9 * Public License ("GPL") as published by the Free Software Foundation, 11 * Public License ("GPL") as published by the Free Software Foundation,
@@ -99,6 +101,20 @@ static inline struct f_acm *port_to_acm(struct gserial *p)
99 101
100/* interface and class descriptors: */ 102/* interface and class descriptors: */
101 103
104static struct usb_interface_assoc_descriptor
105acm_iad_descriptor = {
106 .bLength = sizeof acm_iad_descriptor,
107 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
108
109 /* .bFirstInterface = DYNAMIC, */
110 .bInterfaceCount = 2, // control + data
111 .bFunctionClass = USB_CLASS_COMM,
112 .bFunctionSubClass = USB_CDC_SUBCLASS_ACM,
113 .bFunctionProtocol = USB_CDC_PROTO_NONE,
114 /* .iFunction = DYNAMIC */
115};
116
117
102static struct usb_interface_descriptor acm_control_interface_desc __initdata = { 118static struct usb_interface_descriptor acm_control_interface_desc __initdata = {
103 .bLength = USB_DT_INTERFACE_SIZE, 119 .bLength = USB_DT_INTERFACE_SIZE,
104 .bDescriptorType = USB_DT_INTERFACE, 120 .bDescriptorType = USB_DT_INTERFACE,
@@ -178,6 +194,7 @@ static struct usb_endpoint_descriptor acm_fs_out_desc __initdata = {
178}; 194};
179 195
180static struct usb_descriptor_header *acm_fs_function[] __initdata = { 196static struct usb_descriptor_header *acm_fs_function[] __initdata = {
197 (struct usb_descriptor_header *) &acm_iad_descriptor,
181 (struct usb_descriptor_header *) &acm_control_interface_desc, 198 (struct usb_descriptor_header *) &acm_control_interface_desc,
182 (struct usb_descriptor_header *) &acm_header_desc, 199 (struct usb_descriptor_header *) &acm_header_desc,
183 (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, 200 (struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
@@ -216,6 +233,7 @@ static struct usb_endpoint_descriptor acm_hs_out_desc __initdata = {
216}; 233};
217 234
218static struct usb_descriptor_header *acm_hs_function[] __initdata = { 235static struct usb_descriptor_header *acm_hs_function[] __initdata = {
236 (struct usb_descriptor_header *) &acm_iad_descriptor,
219 (struct usb_descriptor_header *) &acm_control_interface_desc, 237 (struct usb_descriptor_header *) &acm_control_interface_desc,
220 (struct usb_descriptor_header *) &acm_header_desc, 238 (struct usb_descriptor_header *) &acm_header_desc,
221 (struct usb_descriptor_header *) &acm_call_mgmt_descriptor, 239 (struct usb_descriptor_header *) &acm_call_mgmt_descriptor,
@@ -232,11 +250,13 @@ static struct usb_descriptor_header *acm_hs_function[] __initdata = {
232 250
233#define ACM_CTRL_IDX 0 251#define ACM_CTRL_IDX 0
234#define ACM_DATA_IDX 1 252#define ACM_DATA_IDX 1
253#define ACM_IAD_IDX 2
235 254
236/* static strings, in UTF-8 */ 255/* static strings, in UTF-8 */
237static struct usb_string acm_string_defs[] = { 256static struct usb_string acm_string_defs[] = {
238 [ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)", 257 [ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
239 [ACM_DATA_IDX].s = "CDC ACM Data", 258 [ACM_DATA_IDX].s = "CDC ACM Data",
259 [ACM_IAD_IDX ].s = "CDC Serial",
240 { /* ZEROES END LIST */ }, 260 { /* ZEROES END LIST */ },
241}; 261};
242 262
@@ -563,6 +583,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
563 if (status < 0) 583 if (status < 0)
564 goto fail; 584 goto fail;
565 acm->ctrl_id = status; 585 acm->ctrl_id = status;
586 acm_iad_descriptor.bFirstInterface = status;
566 587
567 acm_control_interface_desc.bInterfaceNumber = status; 588 acm_control_interface_desc.bInterfaceNumber = status;
568 acm_union_desc .bMasterInterface0 = status; 589 acm_union_desc .bMasterInterface0 = status;
@@ -732,6 +753,13 @@ int __init acm_bind_config(struct usb_configuration *c, u8 port_num)
732 acm_string_defs[ACM_DATA_IDX].id = status; 753 acm_string_defs[ACM_DATA_IDX].id = status;
733 754
734 acm_data_interface_desc.iInterface = status; 755 acm_data_interface_desc.iInterface = status;
756
757 status = usb_string_id(c->cdev);
758 if (status < 0)
759 return status;
760 acm_string_defs[ACM_IAD_IDX].id = status;
761
762 acm_iad_descriptor.iFunction = status;
735 } 763 }
736 764
737 /* allocate and initialize one new instance */ 765 /* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/f_audio.c b/drivers/usb/gadget/f_audio.c
index 98e9bb977291..c43c89ffa2c8 100644
--- a/drivers/usb/gadget/f_audio.c
+++ b/drivers/usb/gadget/f_audio.c
@@ -445,6 +445,70 @@ static int audio_get_intf_req(struct usb_function *f,
445 return len; 445 return len;
446} 446}
447 447
448static int audio_set_endpoint_req(struct usb_function *f,
449 const struct usb_ctrlrequest *ctrl)
450{
451 struct usb_composite_dev *cdev = f->config->cdev;
452 int value = -EOPNOTSUPP;
453 u16 ep = le16_to_cpu(ctrl->wIndex);
454 u16 len = le16_to_cpu(ctrl->wLength);
455 u16 w_value = le16_to_cpu(ctrl->wValue);
456
457 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
458 ctrl->bRequest, w_value, len, ep);
459
460 switch (ctrl->bRequest) {
461 case UAC_SET_CUR:
462 value = 0;
463 break;
464
465 case UAC_SET_MIN:
466 break;
467
468 case UAC_SET_MAX:
469 break;
470
471 case UAC_SET_RES:
472 break;
473
474 case UAC_SET_MEM:
475 break;
476
477 default:
478 break;
479 }
480
481 return value;
482}
483
484static int audio_get_endpoint_req(struct usb_function *f,
485 const struct usb_ctrlrequest *ctrl)
486{
487 struct usb_composite_dev *cdev = f->config->cdev;
488 int value = -EOPNOTSUPP;
489 u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
490 u16 len = le16_to_cpu(ctrl->wLength);
491 u16 w_value = le16_to_cpu(ctrl->wValue);
492
493 DBG(cdev, "bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
494 ctrl->bRequest, w_value, len, ep);
495
496 switch (ctrl->bRequest) {
497 case UAC_GET_CUR:
498 case UAC_GET_MIN:
499 case UAC_GET_MAX:
500 case UAC_GET_RES:
501 value = 3;
502 break;
503 case UAC_GET_MEM:
504 break;
505 default:
506 break;
507 }
508
509 return value;
510}
511
448static int 512static int
449f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) 513f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
450{ 514{
@@ -455,8 +519,8 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
455 u16 w_value = le16_to_cpu(ctrl->wValue); 519 u16 w_value = le16_to_cpu(ctrl->wValue);
456 u16 w_length = le16_to_cpu(ctrl->wLength); 520 u16 w_length = le16_to_cpu(ctrl->wLength);
457 521
458 /* composite driver infrastructure handles everything except 522 /* composite driver infrastructure handles everything; interface
459 * Audio class messages; interface activation uses set_alt(). 523 * activation uses set_alt().
460 */ 524 */
461 switch (ctrl->bRequestType) { 525 switch (ctrl->bRequestType) {
462 case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE: 526 case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
@@ -467,6 +531,14 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
467 value = audio_get_intf_req(f, ctrl); 531 value = audio_get_intf_req(f, ctrl);
468 break; 532 break;
469 533
534 case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
535 value = audio_set_endpoint_req(f, ctrl);
536 break;
537
538 case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
539 value = audio_get_endpoint_req(f, ctrl);
540 break;
541
470 default: 542 default:
471 ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", 543 ERROR(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
472 ctrl->bRequestType, ctrl->bRequest, 544 ctrl->bRequestType, ctrl->bRequest,
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
new file mode 100644
index 000000000000..a37640eba434
--- /dev/null
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -0,0 +1,3091 @@
1/*
2 * f_mass_storage.c -- Mass Storage USB Composite Function
3 *
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. The names of the above-listed copyright holders may not be used
19 * to endorse or promote products derived from this software without
20 * specific prior written permission.
21 *
22 * ALTERNATIVELY, this software may be distributed under the terms of the
23 * GNU General Public License ("GPL") as published by the Free Software
24 * Foundation, either version 2 of that License or (at your option) any
25 * later version.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
28 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
29 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
31 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
33 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
34 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40
41/*
42 * The Mass Storage Function acts as a USB Mass Storage device,
43 * appearing to the host as a disk drive or as a CD-ROM drive. In
44 * addition to providing an example of a genuinely useful composite
45 * function for a USB device, it also illustrates a technique of
46 * double-buffering for increased throughput.
47 *
48 * Function supports multiple logical units (LUNs). Backing storage
49 * for each LUN is provided by a regular file or a block device.
50 * Access for each LUN can be limited to read-only. Moreover, the
51 * function can indicate that LUN is removable and/or CD-ROM. (The
52 * later implies read-only access.)
53 *
54 * MSF is configured by specifying a fsg_config structure. It has the
55 * following fields:
56 *
57 * nluns Number of LUNs function have (anywhere from 1
58 * to FSG_MAX_LUNS which is 8).
59 * luns An array of LUN configuration values. This
60 * should be filled for each LUN that
61 * function will include (ie. for "nluns"
62 * LUNs). Each element of the array has
63 * the following fields:
64 * ->filename The path to the backing file for the LUN.
65 * Required if LUN is not marked as
66 * removable.
67 * ->ro Flag specifying access to the LUN shall be
68 * read-only. This is implied if CD-ROM
69 * emulation is enabled as well as when
70 * it was impossible to open "filename"
71 * in R/W mode.
72 * ->removable Flag specifying that LUN shall be indicated as
73 * being removable.
74 * ->cdrom Flag specifying that LUN shall be reported as
75 * being a CD-ROM.
76 *
77 * lun_name_format A printf-like format for names of the LUN
78 * devices. This determines how the
79 * directory in sysfs will be named.
80 * Unless you are using several MSFs in
81 * a single gadget (as opposed to single
82 * MSF in many configurations) you may
83 * leave it as NULL (in which case
84 * "lun%d" will be used). In the format
85 * you can use "%d" to index LUNs for
86 * MSF's with more than one LUN. (Beware
87 * that there is only one integer given
88 * as an argument for the format and
89 * specifying invalid format may cause
90 * unspecified behaviour.)
91 * thread_name Name of the kernel thread process used by the
92 * MSF. You can safely set it to NULL
93 * (in which case default "file-storage"
94 * will be used).
95 *
96 * vendor_name
97 * product_name
98 * release Information used as a reply to INQUIRY
99 * request. To use default set to NULL,
100 * NULL, 0xffff respectively. The first
101 * field should be 8 and the second 16
102 * characters or less.
103 *
104 * can_stall Set to permit function to halt bulk endpoints.
105 * Disabled on some USB devices known not
106 * to work correctly. You should set it
107 * to true.
108 *
109 * If "removable" is not set for a LUN then a backing file must be
110 * specified. If it is set, then NULL filename means the LUN's medium
111 * is not loaded (an empty string as "filename" in the fsg_config
112 * structure causes error). The CD-ROM emulation includes a single
113 * data track and no audio tracks; hence there need be only one
114 * backing file per LUN. Note also that the CD-ROM block length is
115 * set to 512 rather than the more common value 2048.
116 *
117 *
118 * MSF includes support for module parameters. If gadget using it
119 * decides to use it, the following module parameters will be
120 * available:
121 *
122 * file=filename[,filename...]
123 * Names of the files or block devices used for
124 * backing storage.
125 * ro=b[,b...] Default false, boolean for read-only access.
126 * removable=b[,b...]
127 * Default true, boolean for removable media.
128 * cdrom=b[,b...] Default false, boolean for whether to emulate
129 * a CD-ROM drive.
130 * luns=N Default N = number of filenames, number of
131 * LUNs to support.
132 * stall Default determined according to the type of
133 * USB device controller (usually true),
134 * boolean to permit the driver to halt
135 * bulk endpoints.
136 *
137 * The module parameters may be prefixed with some string. You need
138 * to consult gadget's documentation or source to verify whether it is
139 * using those module parameters and if it does what are the prefixes
140 * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
141 * the prefix).
142 *
143 *
144 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
145 * needed. The memory requirement amounts to two 16K buffers, size
146 * configurable by a parameter. Support is included for both
147 * full-speed and high-speed operation.
148 *
149 * Note that the driver is slightly non-portable in that it assumes a
150 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
151 * interrupt-in endpoints. With most device controllers this isn't an
152 * issue, but there may be some with hardware restrictions that prevent
153 * a buffer from being used by more than one endpoint.
154 *
155 *
156 * The pathnames of the backing files and the ro settings are
157 * available in the attribute files "file" and "ro" in the lun<n> (or
158 * to be more precise in a directory which name comes from
159 * "lun_name_format" option!) subdirectory of the gadget's sysfs
160 * directory. If the "removable" option is set, writing to these
161 * files will simulate ejecting/loading the medium (writing an empty
162 * line means eject) and adjusting a write-enable tab. Changes to the
163 * ro setting are not allowed when the medium is loaded or if CD-ROM
164 * emulation is being used.
165 *
166 *
167 * This function is heavily based on "File-backed Storage Gadget" by
168 * Alan Stern which in turn is heavily based on "Gadget Zero" by David
169 * Brownell. The driver's SCSI command interface was based on the
170 * "Information technology - Small Computer System Interface - 2"
171 * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
172 * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
173 * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
174 * was based on the "Universal Serial Bus Mass Storage Class UFI
175 * Command Specification" document, Revision 1.0, December 14, 1998,
176 * available at
177 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
178 */
179
180
181/*
182 * Driver Design
183 *
184 * The MSF is fairly straightforward. There is a main kernel
185 * thread that handles most of the work. Interrupt routines field
186 * callbacks from the controller driver: bulk- and interrupt-request
187 * completion notifications, endpoint-0 events, and disconnect events.
188 * Completion events are passed to the main thread by wakeup calls. Many
189 * ep0 requests are handled at interrupt time, but SetInterface,
190 * SetConfiguration, and device reset requests are forwarded to the
191 * thread in the form of "exceptions" using SIGUSR1 signals (since they
192 * should interrupt any ongoing file I/O operations).
193 *
194 * The thread's main routine implements the standard command/data/status
195 * parts of a SCSI interaction. It and its subroutines are full of tests
196 * for pending signals/exceptions -- all this polling is necessary since
197 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
198 * indication that the driver really wants to be running in userspace.)
199 * An important point is that so long as the thread is alive it keeps an
200 * open reference to the backing file. This will prevent unmounting
201 * the backing file's underlying filesystem and could cause problems
202 * during system shutdown, for example. To prevent such problems, the
203 * thread catches INT, TERM, and KILL signals and converts them into
204 * an EXIT exception.
205 *
206 * In normal operation the main thread is started during the gadget's
207 * fsg_bind() callback and stopped during fsg_unbind(). But it can
208 * also exit when it receives a signal, and there's no point leaving
209 * the gadget running when the thread is dead. At of this moment, MSF
210 * provides no way to deregister the gadget when thread dies -- maybe
211 * a callback functions is needed.
212 *
213 * To provide maximum throughput, the driver uses a circular pipeline of
214 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
215 * arbitrarily long; in practice the benefits don't justify having more
216 * than 2 stages (i.e., double buffering). But it helps to think of the
217 * pipeline as being a long one. Each buffer head contains a bulk-in and
218 * a bulk-out request pointer (since the buffer can be used for both
219 * output and input -- directions always are given from the host's
220 * point of view) as well as a pointer to the buffer and various state
221 * variables.
222 *
223 * Use of the pipeline follows a simple protocol. There is a variable
224 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
225 * At any time that buffer head may still be in use from an earlier
226 * request, so each buffer head has a state variable indicating whether
227 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
228 * buffer head to be EMPTY, filling the buffer either by file I/O or by
229 * USB I/O (during which the buffer head is BUSY), and marking the buffer
230 * head FULL when the I/O is complete. Then the buffer will be emptied
231 * (again possibly by USB I/O, during which it is marked BUSY) and
232 * finally marked EMPTY again (possibly by a completion routine).
233 *
234 * A module parameter tells the driver to avoid stalling the bulk
235 * endpoints wherever the transport specification allows. This is
236 * necessary for some UDCs like the SuperH, which cannot reliably clear a
237 * halt on a bulk endpoint. However, under certain circumstances the
238 * Bulk-only specification requires a stall. In such cases the driver
239 * will halt the endpoint and set a flag indicating that it should clear
240 * the halt in software during the next device reset. Hopefully this
241 * will permit everything to work correctly. Furthermore, although the
242 * specification allows the bulk-out endpoint to halt when the host sends
243 * too much data, implementing this would cause an unavoidable race.
244 * The driver will always use the "no-stall" approach for OUT transfers.
245 *
246 * One subtle point concerns sending status-stage responses for ep0
247 * requests. Some of these requests, such as device reset, can involve
248 * interrupting an ongoing file I/O operation, which might take an
249 * arbitrarily long time. During that delay the host might give up on
250 * the original ep0 request and issue a new one. When that happens the
251 * driver should not notify the host about completion of the original
252 * request, as the host will no longer be waiting for it. So the driver
253 * assigns to each ep0 request a unique tag, and it keeps track of the
254 * tag value of the request associated with a long-running exception
255 * (device-reset, interface-change, or configuration-change). When the
256 * exception handler is finished, the status-stage response is submitted
257 * only if the current ep0 request tag is equal to the exception request
258 * tag. Thus only the most recently received ep0 request will get a
259 * status-stage response.
260 *
261 * Warning: This driver source file is too long. It ought to be split up
262 * into a header file plus about 3 separate .c files, to handle the details
263 * of the Gadget, USB Mass Storage, and SCSI protocols.
264 */
265
266
267/* #define VERBOSE_DEBUG */
268/* #define DUMP_MSGS */
269
270
271#include <linux/blkdev.h>
272#include <linux/completion.h>
273#include <linux/dcache.h>
274#include <linux/delay.h>
275#include <linux/device.h>
276#include <linux/fcntl.h>
277#include <linux/file.h>
278#include <linux/fs.h>
279#include <linux/kref.h>
280#include <linux/kthread.h>
281#include <linux/limits.h>
282#include <linux/rwsem.h>
283#include <linux/slab.h>
284#include <linux/spinlock.h>
285#include <linux/string.h>
286#include <linux/freezer.h>
287#include <linux/utsname.h>
288
289#include <linux/usb/ch9.h>
290#include <linux/usb/gadget.h>
291
292#include "gadget_chips.h"
293
294
295
296/*------------------------------------------------------------------------*/
297
298#define FSG_DRIVER_DESC "Mass Storage Function"
299#define FSG_DRIVER_VERSION "2009/09/11"
300
301static const char fsg_string_interface[] = "Mass Storage";
302
303
304#define FSG_NO_INTR_EP 1
305#define FSG_BUFFHD_STATIC_BUFFER 1
306#define FSG_NO_DEVICE_STRINGS 1
307#define FSG_NO_OTG 1
308#define FSG_NO_INTR_EP 1
309
310#include "storage_common.c"
311
312
313/*-------------------------------------------------------------------------*/
314
315struct fsg_dev;
316
317
318/* Data shared by all the FSG instances. */
319struct fsg_common {
320 struct usb_gadget *gadget;
321 struct fsg_dev *fsg;
322 struct fsg_dev *prev_fsg;
323
324 /* filesem protects: backing files in use */
325 struct rw_semaphore filesem;
326
327 /* lock protects: state, all the req_busy's */
328 spinlock_t lock;
329
330 struct usb_ep *ep0; /* Copy of gadget->ep0 */
331 struct usb_request *ep0req; /* Copy of cdev->req */
332 unsigned int ep0_req_tag;
333 const char *ep0req_name;
334
335 struct fsg_buffhd *next_buffhd_to_fill;
336 struct fsg_buffhd *next_buffhd_to_drain;
337 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
338
339 int cmnd_size;
340 u8 cmnd[MAX_COMMAND_SIZE];
341
342 unsigned int nluns;
343 unsigned int lun;
344 struct fsg_lun *luns;
345 struct fsg_lun *curlun;
346
347 unsigned int bulk_out_maxpacket;
348 enum fsg_state state; /* For exception handling */
349 unsigned int exception_req_tag;
350
351 u8 config, new_config;
352 enum data_direction data_dir;
353 u32 data_size;
354 u32 data_size_from_cmnd;
355 u32 tag;
356 u32 residue;
357 u32 usb_amount_left;
358
359 unsigned int can_stall:1;
360 unsigned int free_storage_on_release:1;
361 unsigned int phase_error:1;
362 unsigned int short_packet_received:1;
363 unsigned int bad_lun_okay:1;
364 unsigned int running:1;
365
366 int thread_wakeup_needed;
367 struct completion thread_notifier;
368 struct task_struct *thread_task;
369
370 /* Callback function to call when thread exits. */
371 void (*thread_exits)(struct fsg_common *common);
372 /* Gadget's private data. */
373 void *private_data;
374
375 /* Vendor (8 chars), product (16 chars), release (4
376 * hexadecimal digits) and NUL byte */
377 char inquiry_string[8 + 16 + 4 + 1];
378
379 struct kref ref;
380};
381
382
383struct fsg_config {
384 unsigned nluns;
385 struct fsg_lun_config {
386 const char *filename;
387 char ro;
388 char removable;
389 char cdrom;
390 } luns[FSG_MAX_LUNS];
391
392 const char *lun_name_format;
393 const char *thread_name;
394
395 /* Callback function to call when thread exits. */
396 void (*thread_exits)(struct fsg_common *common);
397 /* Gadget's private data. */
398 void *private_data;
399
400 const char *vendor_name; /* 8 characters or less */
401 const char *product_name; /* 16 characters or less */
402 u16 release;
403
404 char can_stall;
405};
406
407
408struct fsg_dev {
409 struct usb_function function;
410 struct usb_gadget *gadget; /* Copy of cdev->gadget */
411 struct fsg_common *common;
412
413 u16 interface_number;
414
415 unsigned int bulk_in_enabled:1;
416 unsigned int bulk_out_enabled:1;
417
418 unsigned long atomic_bitflags;
419#define IGNORE_BULK_OUT 0
420
421 struct usb_ep *bulk_in;
422 struct usb_ep *bulk_out;
423};
424
425
426static inline int __fsg_is_set(struct fsg_common *common,
427 const char *func, unsigned line)
428{
429 if (common->fsg)
430 return 1;
431 ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
432 return 0;
433}
434
435#define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
436
437
438static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
439{
440 return container_of(f, struct fsg_dev, function);
441}
442
443
444typedef void (*fsg_routine_t)(struct fsg_dev *);
445
446static int exception_in_progress(struct fsg_common *common)
447{
448 return common->state > FSG_STATE_IDLE;
449}
450
451/* Make bulk-out requests be divisible by the maxpacket size */
452static void set_bulk_out_req_length(struct fsg_common *common,
453 struct fsg_buffhd *bh, unsigned int length)
454{
455 unsigned int rem;
456
457 bh->bulk_out_intended_length = length;
458 rem = length % common->bulk_out_maxpacket;
459 if (rem > 0)
460 length += common->bulk_out_maxpacket - rem;
461 bh->outreq->length = length;
462}
463
464/*-------------------------------------------------------------------------*/
465
466static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
467{
468 const char *name;
469
470 if (ep == fsg->bulk_in)
471 name = "bulk-in";
472 else if (ep == fsg->bulk_out)
473 name = "bulk-out";
474 else
475 name = ep->name;
476 DBG(fsg, "%s set halt\n", name);
477 return usb_ep_set_halt(ep);
478}
479
480
481/*-------------------------------------------------------------------------*/
482
483/* These routines may be called in process context or in_irq */
484
485/* Caller must hold fsg->lock */
486static void wakeup_thread(struct fsg_common *common)
487{
488 /* Tell the main thread that something has happened */
489 common->thread_wakeup_needed = 1;
490 if (common->thread_task)
491 wake_up_process(common->thread_task);
492}
493
494
495static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
496{
497 unsigned long flags;
498
499 /* Do nothing if a higher-priority exception is already in progress.
500 * If a lower-or-equal priority exception is in progress, preempt it
501 * and notify the main thread by sending it a signal. */
502 spin_lock_irqsave(&common->lock, flags);
503 if (common->state <= new_state) {
504 common->exception_req_tag = common->ep0_req_tag;
505 common->state = new_state;
506 if (common->thread_task)
507 send_sig_info(SIGUSR1, SEND_SIG_FORCED,
508 common->thread_task);
509 }
510 spin_unlock_irqrestore(&common->lock, flags);
511}
512
513
514/*-------------------------------------------------------------------------*/
515
516static int ep0_queue(struct fsg_common *common)
517{
518 int rc;
519
520 rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
521 common->ep0->driver_data = common;
522 if (rc != 0 && rc != -ESHUTDOWN) {
523 /* We can't do much more than wait for a reset */
524 WARNING(common, "error in submission: %s --> %d\n",
525 common->ep0->name, rc);
526 }
527 return rc;
528}
529
530/*-------------------------------------------------------------------------*/
531
532/* Bulk and interrupt endpoint completion handlers.
533 * These always run in_irq. */
534
535static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
536{
537 struct fsg_common *common = ep->driver_data;
538 struct fsg_buffhd *bh = req->context;
539
540 if (req->status || req->actual != req->length)
541 DBG(common, "%s --> %d, %u/%u\n", __func__,
542 req->status, req->actual, req->length);
543 if (req->status == -ECONNRESET) /* Request was cancelled */
544 usb_ep_fifo_flush(ep);
545
546 /* Hold the lock while we update the request and buffer states */
547 smp_wmb();
548 spin_lock(&common->lock);
549 bh->inreq_busy = 0;
550 bh->state = BUF_STATE_EMPTY;
551 wakeup_thread(common);
552 spin_unlock(&common->lock);
553}
554
555static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
556{
557 struct fsg_common *common = ep->driver_data;
558 struct fsg_buffhd *bh = req->context;
559
560 dump_msg(common, "bulk-out", req->buf, req->actual);
561 if (req->status || req->actual != bh->bulk_out_intended_length)
562 DBG(common, "%s --> %d, %u/%u\n", __func__,
563 req->status, req->actual,
564 bh->bulk_out_intended_length);
565 if (req->status == -ECONNRESET) /* Request was cancelled */
566 usb_ep_fifo_flush(ep);
567
568 /* Hold the lock while we update the request and buffer states */
569 smp_wmb();
570 spin_lock(&common->lock);
571 bh->outreq_busy = 0;
572 bh->state = BUF_STATE_FULL;
573 wakeup_thread(common);
574 spin_unlock(&common->lock);
575}
576
577
578/*-------------------------------------------------------------------------*/
579
580/* Ep0 class-specific handlers. These always run in_irq. */
581
582static int fsg_setup(struct usb_function *f,
583 const struct usb_ctrlrequest *ctrl)
584{
585 struct fsg_dev *fsg = fsg_from_func(f);
586 struct usb_request *req = fsg->common->ep0req;
587 u16 w_index = le16_to_cpu(ctrl->wIndex);
588 u16 w_value = le16_to_cpu(ctrl->wValue);
589 u16 w_length = le16_to_cpu(ctrl->wLength);
590
591 if (!fsg->common->config)
592 return -EOPNOTSUPP;
593
594 switch (ctrl->bRequest) {
595
596 case USB_BULK_RESET_REQUEST:
597 if (ctrl->bRequestType !=
598 (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
599 break;
600 if (w_index != fsg->interface_number || w_value != 0)
601 return -EDOM;
602
603 /* Raise an exception to stop the current operation
604 * and reinitialize our state. */
605 DBG(fsg, "bulk reset request\n");
606 raise_exception(fsg->common, FSG_STATE_RESET);
607 return DELAYED_STATUS;
608
609 case USB_BULK_GET_MAX_LUN_REQUEST:
610 if (ctrl->bRequestType !=
611 (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
612 break;
613 if (w_index != fsg->interface_number || w_value != 0)
614 return -EDOM;
615 VDBG(fsg, "get max LUN\n");
616 *(u8 *) req->buf = fsg->common->nluns - 1;
617 return 1;
618 }
619
620 VDBG(fsg,
621 "unknown class-specific control req "
622 "%02x.%02x v%04x i%04x l%u\n",
623 ctrl->bRequestType, ctrl->bRequest,
624 le16_to_cpu(ctrl->wValue), w_index, w_length);
625 return -EOPNOTSUPP;
626}
627
628
629/*-------------------------------------------------------------------------*/
630
631/* All the following routines run in process context */
632
633
634/* Use this for bulk or interrupt transfers, not ep0 */
635static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
636 struct usb_request *req, int *pbusy,
637 enum fsg_buffer_state *state)
638{
639 int rc;
640
641 if (ep == fsg->bulk_in)
642 dump_msg(fsg, "bulk-in", req->buf, req->length);
643
644 spin_lock_irq(&fsg->common->lock);
645 *pbusy = 1;
646 *state = BUF_STATE_BUSY;
647 spin_unlock_irq(&fsg->common->lock);
648 rc = usb_ep_queue(ep, req, GFP_KERNEL);
649 if (rc != 0) {
650 *pbusy = 0;
651 *state = BUF_STATE_EMPTY;
652
653 /* We can't do much more than wait for a reset */
654
655 /* Note: currently the net2280 driver fails zero-length
656 * submissions if DMA is enabled. */
657 if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
658 req->length == 0))
659 WARNING(fsg, "error in submission: %s --> %d\n",
660 ep->name, rc);
661 }
662}
663
664#define START_TRANSFER_OR(common, ep_name, req, pbusy, state) \
665 if (fsg_is_set(common)) \
666 start_transfer((common)->fsg, (common)->fsg->ep_name, \
667 req, pbusy, state); \
668 else
669
670#define START_TRANSFER(common, ep_name, req, pbusy, state) \
671 START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
672
673
674
675static int sleep_thread(struct fsg_common *common)
676{
677 int rc = 0;
678
679 /* Wait until a signal arrives or we are woken up */
680 for (;;) {
681 try_to_freeze();
682 set_current_state(TASK_INTERRUPTIBLE);
683 if (signal_pending(current)) {
684 rc = -EINTR;
685 break;
686 }
687 if (common->thread_wakeup_needed)
688 break;
689 schedule();
690 }
691 __set_current_state(TASK_RUNNING);
692 common->thread_wakeup_needed = 0;
693 return rc;
694}
695
696
697/*-------------------------------------------------------------------------*/
698
699static int do_read(struct fsg_common *common)
700{
701 struct fsg_lun *curlun = common->curlun;
702 u32 lba;
703 struct fsg_buffhd *bh;
704 int rc;
705 u32 amount_left;
706 loff_t file_offset, file_offset_tmp;
707 unsigned int amount;
708 unsigned int partial_page;
709 ssize_t nread;
710
711 /* Get the starting Logical Block Address and check that it's
712 * not too big */
713 if (common->cmnd[0] == SC_READ_6)
714 lba = get_unaligned_be24(&common->cmnd[1]);
715 else {
716 lba = get_unaligned_be32(&common->cmnd[2]);
717
718 /* We allow DPO (Disable Page Out = don't save data in the
719 * cache) and FUA (Force Unit Access = don't read from the
720 * cache), but we don't implement them. */
721 if ((common->cmnd[1] & ~0x18) != 0) {
722 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
723 return -EINVAL;
724 }
725 }
726 if (lba >= curlun->num_sectors) {
727 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
728 return -EINVAL;
729 }
730 file_offset = ((loff_t) lba) << 9;
731
732 /* Carry out the file reads */
733 amount_left = common->data_size_from_cmnd;
734 if (unlikely(amount_left == 0))
735 return -EIO; /* No default reply */
736
737 for (;;) {
738
739 /* Figure out how much we need to read:
740 * Try to read the remaining amount.
741 * But don't read more than the buffer size.
742 * And don't try to read past the end of the file.
743 * Finally, if we're not at a page boundary, don't read past
744 * the next page.
745 * If this means reading 0 then we were asked to read past
746 * the end of file. */
747 amount = min(amount_left, FSG_BUFLEN);
748 amount = min((loff_t) amount,
749 curlun->file_length - file_offset);
750 partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
751 if (partial_page > 0)
752 amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
753 partial_page);
754
755 /* Wait for the next buffer to become available */
756 bh = common->next_buffhd_to_fill;
757 while (bh->state != BUF_STATE_EMPTY) {
758 rc = sleep_thread(common);
759 if (rc)
760 return rc;
761 }
762
763 /* If we were asked to read past the end of file,
764 * end with an empty buffer. */
765 if (amount == 0) {
766 curlun->sense_data =
767 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
768 curlun->sense_data_info = file_offset >> 9;
769 curlun->info_valid = 1;
770 bh->inreq->length = 0;
771 bh->state = BUF_STATE_FULL;
772 break;
773 }
774
775 /* Perform the read */
776 file_offset_tmp = file_offset;
777 nread = vfs_read(curlun->filp,
778 (char __user *) bh->buf,
779 amount, &file_offset_tmp);
780 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
781 (unsigned long long) file_offset,
782 (int) nread);
783 if (signal_pending(current))
784 return -EINTR;
785
786 if (nread < 0) {
787 LDBG(curlun, "error in file read: %d\n",
788 (int) nread);
789 nread = 0;
790 } else if (nread < amount) {
791 LDBG(curlun, "partial file read: %d/%u\n",
792 (int) nread, amount);
793 nread -= (nread & 511); /* Round down to a block */
794 }
795 file_offset += nread;
796 amount_left -= nread;
797 common->residue -= nread;
798 bh->inreq->length = nread;
799 bh->state = BUF_STATE_FULL;
800
801 /* If an error occurred, report it and its position */
802 if (nread < amount) {
803 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
804 curlun->sense_data_info = file_offset >> 9;
805 curlun->info_valid = 1;
806 break;
807 }
808
809 if (amount_left == 0)
810 break; /* No more left to read */
811
812 /* Send this buffer and go read some more */
813 bh->inreq->zero = 0;
814 START_TRANSFER_OR(common, bulk_in, bh->inreq,
815 &bh->inreq_busy, &bh->state)
816 /* Don't know what to do if
817 * common->fsg is NULL */
818 return -EIO;
819 common->next_buffhd_to_fill = bh->next;
820 }
821
822 return -EIO; /* No default reply */
823}
824
825
826/*-------------------------------------------------------------------------*/
827
828static int do_write(struct fsg_common *common)
829{
830 struct fsg_lun *curlun = common->curlun;
831 u32 lba;
832 struct fsg_buffhd *bh;
833 int get_some_more;
834 u32 amount_left_to_req, amount_left_to_write;
835 loff_t usb_offset, file_offset, file_offset_tmp;
836 unsigned int amount;
837 unsigned int partial_page;
838 ssize_t nwritten;
839 int rc;
840
841 if (curlun->ro) {
842 curlun->sense_data = SS_WRITE_PROTECTED;
843 return -EINVAL;
844 }
845 spin_lock(&curlun->filp->f_lock);
846 curlun->filp->f_flags &= ~O_SYNC; /* Default is not to wait */
847 spin_unlock(&curlun->filp->f_lock);
848
849 /* Get the starting Logical Block Address and check that it's
850 * not too big */
851 if (common->cmnd[0] == SC_WRITE_6)
852 lba = get_unaligned_be24(&common->cmnd[1]);
853 else {
854 lba = get_unaligned_be32(&common->cmnd[2]);
855
856 /* We allow DPO (Disable Page Out = don't save data in the
857 * cache) and FUA (Force Unit Access = write directly to the
858 * medium). We don't implement DPO; we implement FUA by
859 * performing synchronous output. */
860 if (common->cmnd[1] & ~0x18) {
861 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
862 return -EINVAL;
863 }
864 if (common->cmnd[1] & 0x08) { /* FUA */
865 spin_lock(&curlun->filp->f_lock);
866 curlun->filp->f_flags |= O_SYNC;
867 spin_unlock(&curlun->filp->f_lock);
868 }
869 }
870 if (lba >= curlun->num_sectors) {
871 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
872 return -EINVAL;
873 }
874
875 /* Carry out the file writes */
876 get_some_more = 1;
877 file_offset = usb_offset = ((loff_t) lba) << 9;
878 amount_left_to_req = common->data_size_from_cmnd;
879 amount_left_to_write = common->data_size_from_cmnd;
880
881 while (amount_left_to_write > 0) {
882
883 /* Queue a request for more data from the host */
884 bh = common->next_buffhd_to_fill;
885 if (bh->state == BUF_STATE_EMPTY && get_some_more) {
886
887 /* Figure out how much we want to get:
888 * Try to get the remaining amount.
889 * But don't get more than the buffer size.
890 * And don't try to go past the end of the file.
891 * If we're not at a page boundary,
892 * don't go past the next page.
893 * If this means getting 0, then we were asked
894 * to write past the end of file.
895 * Finally, round down to a block boundary. */
896 amount = min(amount_left_to_req, FSG_BUFLEN);
897 amount = min((loff_t) amount, curlun->file_length -
898 usb_offset);
899 partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
900 if (partial_page > 0)
901 amount = min(amount,
902 (unsigned int) PAGE_CACHE_SIZE - partial_page);
903
904 if (amount == 0) {
905 get_some_more = 0;
906 curlun->sense_data =
907 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
908 curlun->sense_data_info = usb_offset >> 9;
909 curlun->info_valid = 1;
910 continue;
911 }
912 amount -= (amount & 511);
913 if (amount == 0) {
914
915 /* Why were we were asked to transfer a
916 * partial block? */
917 get_some_more = 0;
918 continue;
919 }
920
921 /* Get the next buffer */
922 usb_offset += amount;
923 common->usb_amount_left -= amount;
924 amount_left_to_req -= amount;
925 if (amount_left_to_req == 0)
926 get_some_more = 0;
927
928 /* amount is always divisible by 512, hence by
929 * the bulk-out maxpacket size */
930 bh->outreq->length = amount;
931 bh->bulk_out_intended_length = amount;
932 bh->outreq->short_not_ok = 1;
933 START_TRANSFER_OR(common, bulk_out, bh->outreq,
934 &bh->outreq_busy, &bh->state)
935 /* Don't know what to do if
936 * common->fsg is NULL */
937 return -EIO;
938 common->next_buffhd_to_fill = bh->next;
939 continue;
940 }
941
942 /* Write the received data to the backing file */
943 bh = common->next_buffhd_to_drain;
944 if (bh->state == BUF_STATE_EMPTY && !get_some_more)
945 break; /* We stopped early */
946 if (bh->state == BUF_STATE_FULL) {
947 smp_rmb();
948 common->next_buffhd_to_drain = bh->next;
949 bh->state = BUF_STATE_EMPTY;
950
951 /* Did something go wrong with the transfer? */
952 if (bh->outreq->status != 0) {
953 curlun->sense_data = SS_COMMUNICATION_FAILURE;
954 curlun->sense_data_info = file_offset >> 9;
955 curlun->info_valid = 1;
956 break;
957 }
958
959 amount = bh->outreq->actual;
960 if (curlun->file_length - file_offset < amount) {
961 LERROR(curlun,
962 "write %u @ %llu beyond end %llu\n",
963 amount, (unsigned long long) file_offset,
964 (unsigned long long) curlun->file_length);
965 amount = curlun->file_length - file_offset;
966 }
967
968 /* Perform the write */
969 file_offset_tmp = file_offset;
970 nwritten = vfs_write(curlun->filp,
971 (char __user *) bh->buf,
972 amount, &file_offset_tmp);
973 VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
974 (unsigned long long) file_offset,
975 (int) nwritten);
976 if (signal_pending(current))
977 return -EINTR; /* Interrupted! */
978
979 if (nwritten < 0) {
980 LDBG(curlun, "error in file write: %d\n",
981 (int) nwritten);
982 nwritten = 0;
983 } else if (nwritten < amount) {
984 LDBG(curlun, "partial file write: %d/%u\n",
985 (int) nwritten, amount);
986 nwritten -= (nwritten & 511);
987 /* Round down to a block */
988 }
989 file_offset += nwritten;
990 amount_left_to_write -= nwritten;
991 common->residue -= nwritten;
992
993 /* If an error occurred, report it and its position */
994 if (nwritten < amount) {
995 curlun->sense_data = SS_WRITE_ERROR;
996 curlun->sense_data_info = file_offset >> 9;
997 curlun->info_valid = 1;
998 break;
999 }
1000
1001 /* Did the host decide to stop early? */
1002 if (bh->outreq->actual != bh->outreq->length) {
1003 common->short_packet_received = 1;
1004 break;
1005 }
1006 continue;
1007 }
1008
1009 /* Wait for something to happen */
1010 rc = sleep_thread(common);
1011 if (rc)
1012 return rc;
1013 }
1014
1015 return -EIO; /* No default reply */
1016}
1017
1018
1019/*-------------------------------------------------------------------------*/
1020
1021static int do_synchronize_cache(struct fsg_common *common)
1022{
1023 struct fsg_lun *curlun = common->curlun;
1024 int rc;
1025
1026 /* We ignore the requested LBA and write out all file's
1027 * dirty data buffers. */
1028 rc = fsg_lun_fsync_sub(curlun);
1029 if (rc)
1030 curlun->sense_data = SS_WRITE_ERROR;
1031 return 0;
1032}
1033
1034
1035/*-------------------------------------------------------------------------*/
1036
1037static void invalidate_sub(struct fsg_lun *curlun)
1038{
1039 struct file *filp = curlun->filp;
1040 struct inode *inode = filp->f_path.dentry->d_inode;
1041 unsigned long rc;
1042
1043 rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1044 VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
1045}
1046
1047static int do_verify(struct fsg_common *common)
1048{
1049 struct fsg_lun *curlun = common->curlun;
1050 u32 lba;
1051 u32 verification_length;
1052 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1053 loff_t file_offset, file_offset_tmp;
1054 u32 amount_left;
1055 unsigned int amount;
1056 ssize_t nread;
1057
1058 /* Get the starting Logical Block Address and check that it's
1059 * not too big */
1060 lba = get_unaligned_be32(&common->cmnd[2]);
1061 if (lba >= curlun->num_sectors) {
1062 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1063 return -EINVAL;
1064 }
1065
1066 /* We allow DPO (Disable Page Out = don't save data in the
1067 * cache) but we don't implement it. */
1068 if (common->cmnd[1] & ~0x10) {
1069 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1070 return -EINVAL;
1071 }
1072
1073 verification_length = get_unaligned_be16(&common->cmnd[7]);
1074 if (unlikely(verification_length == 0))
1075 return -EIO; /* No default reply */
1076
1077 /* Prepare to carry out the file verify */
1078 amount_left = verification_length << 9;
1079 file_offset = ((loff_t) lba) << 9;
1080
1081 /* Write out all the dirty buffers before invalidating them */
1082 fsg_lun_fsync_sub(curlun);
1083 if (signal_pending(current))
1084 return -EINTR;
1085
1086 invalidate_sub(curlun);
1087 if (signal_pending(current))
1088 return -EINTR;
1089
1090 /* Just try to read the requested blocks */
1091 while (amount_left > 0) {
1092
1093 /* Figure out how much we need to read:
1094 * Try to read the remaining amount, but not more than
1095 * the buffer size.
1096 * And don't try to read past the end of the file.
1097 * If this means reading 0 then we were asked to read
1098 * past the end of file. */
1099 amount = min(amount_left, FSG_BUFLEN);
1100 amount = min((loff_t) amount,
1101 curlun->file_length - file_offset);
1102 if (amount == 0) {
1103 curlun->sense_data =
1104 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1105 curlun->sense_data_info = file_offset >> 9;
1106 curlun->info_valid = 1;
1107 break;
1108 }
1109
1110 /* Perform the read */
1111 file_offset_tmp = file_offset;
1112 nread = vfs_read(curlun->filp,
1113 (char __user *) bh->buf,
1114 amount, &file_offset_tmp);
1115 VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1116 (unsigned long long) file_offset,
1117 (int) nread);
1118 if (signal_pending(current))
1119 return -EINTR;
1120
1121 if (nread < 0) {
1122 LDBG(curlun, "error in file verify: %d\n",
1123 (int) nread);
1124 nread = 0;
1125 } else if (nread < amount) {
1126 LDBG(curlun, "partial file verify: %d/%u\n",
1127 (int) nread, amount);
1128 nread -= (nread & 511); /* Round down to a sector */
1129 }
1130 if (nread == 0) {
1131 curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1132 curlun->sense_data_info = file_offset >> 9;
1133 curlun->info_valid = 1;
1134 break;
1135 }
1136 file_offset += nread;
1137 amount_left -= nread;
1138 }
1139 return 0;
1140}
1141
1142
1143/*-------------------------------------------------------------------------*/
1144
1145static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1146{
1147 struct fsg_lun *curlun = common->curlun;
1148 u8 *buf = (u8 *) bh->buf;
1149
1150 if (!curlun) { /* Unsupported LUNs are okay */
1151 common->bad_lun_okay = 1;
1152 memset(buf, 0, 36);
1153 buf[0] = 0x7f; /* Unsupported, no device-type */
1154 buf[4] = 31; /* Additional length */
1155 return 36;
1156 }
1157
1158 buf[0] = curlun->cdrom ? TYPE_CDROM : TYPE_DISK;
1159 buf[1] = curlun->removable ? 0x80 : 0;
1160 buf[2] = 2; /* ANSI SCSI level 2 */
1161 buf[3] = 2; /* SCSI-2 INQUIRY data format */
1162 buf[4] = 31; /* Additional length */
1163 buf[5] = 0; /* No special options */
1164 buf[6] = 0;
1165 buf[7] = 0;
1166 memcpy(buf + 8, common->inquiry_string, sizeof common->inquiry_string);
1167 return 36;
1168}
1169
1170
1171static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1172{
1173 struct fsg_lun *curlun = common->curlun;
1174 u8 *buf = (u8 *) bh->buf;
1175 u32 sd, sdinfo;
1176 int valid;
1177
1178 /*
1179 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1180 *
1181 * If a REQUEST SENSE command is received from an initiator
1182 * with a pending unit attention condition (before the target
1183 * generates the contingent allegiance condition), then the
1184 * target shall either:
1185 * a) report any pending sense data and preserve the unit
1186 * attention condition on the logical unit, or,
1187 * b) report the unit attention condition, may discard any
1188 * pending sense data, and clear the unit attention
1189 * condition on the logical unit for that initiator.
1190 *
1191 * FSG normally uses option a); enable this code to use option b).
1192 */
1193#if 0
1194 if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1195 curlun->sense_data = curlun->unit_attention_data;
1196 curlun->unit_attention_data = SS_NO_SENSE;
1197 }
1198#endif
1199
1200 if (!curlun) { /* Unsupported LUNs are okay */
1201 common->bad_lun_okay = 1;
1202 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1203 sdinfo = 0;
1204 valid = 0;
1205 } else {
1206 sd = curlun->sense_data;
1207 sdinfo = curlun->sense_data_info;
1208 valid = curlun->info_valid << 7;
1209 curlun->sense_data = SS_NO_SENSE;
1210 curlun->sense_data_info = 0;
1211 curlun->info_valid = 0;
1212 }
1213
1214 memset(buf, 0, 18);
1215 buf[0] = valid | 0x70; /* Valid, current error */
1216 buf[2] = SK(sd);
1217 put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1218 buf[7] = 18 - 8; /* Additional sense length */
1219 buf[12] = ASC(sd);
1220 buf[13] = ASCQ(sd);
1221 return 18;
1222}
1223
1224
1225static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1226{
1227 struct fsg_lun *curlun = common->curlun;
1228 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1229 int pmi = common->cmnd[8];
1230 u8 *buf = (u8 *) bh->buf;
1231
1232 /* Check the PMI and LBA fields */
1233 if (pmi > 1 || (pmi == 0 && lba != 0)) {
1234 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1235 return -EINVAL;
1236 }
1237
1238 put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1239 /* Max logical block */
1240 put_unaligned_be32(512, &buf[4]); /* Block length */
1241 return 8;
1242}
1243
1244
1245static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1246{
1247 struct fsg_lun *curlun = common->curlun;
1248 int msf = common->cmnd[1] & 0x02;
1249 u32 lba = get_unaligned_be32(&common->cmnd[2]);
1250 u8 *buf = (u8 *) bh->buf;
1251
1252 if (common->cmnd[1] & ~0x02) { /* Mask away MSF */
1253 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1254 return -EINVAL;
1255 }
1256 if (lba >= curlun->num_sectors) {
1257 curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1258 return -EINVAL;
1259 }
1260
1261 memset(buf, 0, 8);
1262 buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1263 store_cdrom_address(&buf[4], msf, lba);
1264 return 8;
1265}
1266
1267
1268static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1269{
1270 struct fsg_lun *curlun = common->curlun;
1271 int msf = common->cmnd[1] & 0x02;
1272 int start_track = common->cmnd[6];
1273 u8 *buf = (u8 *) bh->buf;
1274
1275 if ((common->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1276 start_track > 1) {
1277 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1278 return -EINVAL;
1279 }
1280
1281 memset(buf, 0, 20);
1282 buf[1] = (20-2); /* TOC data length */
1283 buf[2] = 1; /* First track number */
1284 buf[3] = 1; /* Last track number */
1285 buf[5] = 0x16; /* Data track, copying allowed */
1286 buf[6] = 0x01; /* Only track is number 1 */
1287 store_cdrom_address(&buf[8], msf, 0);
1288
1289 buf[13] = 0x16; /* Lead-out track is data */
1290 buf[14] = 0xAA; /* Lead-out track number */
1291 store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1292 return 20;
1293}
1294
1295
1296static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1297{
1298 struct fsg_lun *curlun = common->curlun;
1299 int mscmnd = common->cmnd[0];
1300 u8 *buf = (u8 *) bh->buf;
1301 u8 *buf0 = buf;
1302 int pc, page_code;
1303 int changeable_values, all_pages;
1304 int valid_page = 0;
1305 int len, limit;
1306
1307 if ((common->cmnd[1] & ~0x08) != 0) { /* Mask away DBD */
1308 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1309 return -EINVAL;
1310 }
1311 pc = common->cmnd[2] >> 6;
1312 page_code = common->cmnd[2] & 0x3f;
1313 if (pc == 3) {
1314 curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1315 return -EINVAL;
1316 }
1317 changeable_values = (pc == 1);
1318 all_pages = (page_code == 0x3f);
1319
1320 /* Write the mode parameter header. Fixed values are: default
1321 * medium type, no cache control (DPOFUA), and no block descriptors.
1322 * The only variable value is the WriteProtect bit. We will fill in
1323 * the mode data length later. */
1324 memset(buf, 0, 8);
1325 if (mscmnd == SC_MODE_SENSE_6) {
1326 buf[2] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1327 buf += 4;
1328 limit = 255;
1329 } else { /* SC_MODE_SENSE_10 */
1330 buf[3] = (curlun->ro ? 0x80 : 0x00); /* WP, DPOFUA */
1331 buf += 8;
1332 limit = 65535; /* Should really be FSG_BUFLEN */
1333 }
1334
1335 /* No block descriptors */
1336
1337 /* The mode pages, in numerical order. The only page we support
1338 * is the Caching page. */
1339 if (page_code == 0x08 || all_pages) {
1340 valid_page = 1;
1341 buf[0] = 0x08; /* Page code */
1342 buf[1] = 10; /* Page length */
1343 memset(buf+2, 0, 10); /* None of the fields are changeable */
1344
1345 if (!changeable_values) {
1346 buf[2] = 0x04; /* Write cache enable, */
1347 /* Read cache not disabled */
1348 /* No cache retention priorities */
1349 put_unaligned_be16(0xffff, &buf[4]);
1350 /* Don't disable prefetch */
1351 /* Minimum prefetch = 0 */
1352 put_unaligned_be16(0xffff, &buf[8]);
1353 /* Maximum prefetch */
1354 put_unaligned_be16(0xffff, &buf[10]);
1355 /* Maximum prefetch ceiling */
1356 }
1357 buf += 12;
1358 }
1359
1360 /* Check that a valid page was requested and the mode data length
1361 * isn't too long. */
1362 len = buf - buf0;
1363 if (!valid_page || len > limit) {
1364 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1365 return -EINVAL;
1366 }
1367
1368 /* Store the mode data length */
1369 if (mscmnd == SC_MODE_SENSE_6)
1370 buf0[0] = len - 1;
1371 else
1372 put_unaligned_be16(len - 2, buf0);
1373 return len;
1374}
1375
1376
1377static int do_start_stop(struct fsg_common *common)
1378{
1379 if (!common->curlun) {
1380 return -EINVAL;
1381 } else if (!common->curlun->removable) {
1382 common->curlun->sense_data = SS_INVALID_COMMAND;
1383 return -EINVAL;
1384 }
1385 return 0;
1386}
1387
1388
1389static int do_prevent_allow(struct fsg_common *common)
1390{
1391 struct fsg_lun *curlun = common->curlun;
1392 int prevent;
1393
1394 if (!common->curlun) {
1395 return -EINVAL;
1396 } else if (!common->curlun->removable) {
1397 common->curlun->sense_data = SS_INVALID_COMMAND;
1398 return -EINVAL;
1399 }
1400
1401 prevent = common->cmnd[4] & 0x01;
1402 if ((common->cmnd[4] & ~0x01) != 0) { /* Mask away Prevent */
1403 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1404 return -EINVAL;
1405 }
1406
1407 if (curlun->prevent_medium_removal && !prevent)
1408 fsg_lun_fsync_sub(curlun);
1409 curlun->prevent_medium_removal = prevent;
1410 return 0;
1411}
1412
1413
1414static int do_read_format_capacities(struct fsg_common *common,
1415 struct fsg_buffhd *bh)
1416{
1417 struct fsg_lun *curlun = common->curlun;
1418 u8 *buf = (u8 *) bh->buf;
1419
1420 buf[0] = buf[1] = buf[2] = 0;
1421 buf[3] = 8; /* Only the Current/Maximum Capacity Descriptor */
1422 buf += 4;
1423
1424 put_unaligned_be32(curlun->num_sectors, &buf[0]);
1425 /* Number of blocks */
1426 put_unaligned_be32(512, &buf[4]); /* Block length */
1427 buf[4] = 0x02; /* Current capacity */
1428 return 12;
1429}
1430
1431
1432static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1433{
1434 struct fsg_lun *curlun = common->curlun;
1435
1436 /* We don't support MODE SELECT */
1437 if (curlun)
1438 curlun->sense_data = SS_INVALID_COMMAND;
1439 return -EINVAL;
1440}
1441
1442
1443/*-------------------------------------------------------------------------*/
1444
1445static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1446{
1447 int rc;
1448
1449 rc = fsg_set_halt(fsg, fsg->bulk_in);
1450 if (rc == -EAGAIN)
1451 VDBG(fsg, "delayed bulk-in endpoint halt\n");
1452 while (rc != 0) {
1453 if (rc != -EAGAIN) {
1454 WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1455 rc = 0;
1456 break;
1457 }
1458
1459 /* Wait for a short time and then try again */
1460 if (msleep_interruptible(100) != 0)
1461 return -EINTR;
1462 rc = usb_ep_set_halt(fsg->bulk_in);
1463 }
1464 return rc;
1465}
1466
1467static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1468{
1469 int rc;
1470
1471 DBG(fsg, "bulk-in set wedge\n");
1472 rc = usb_ep_set_wedge(fsg->bulk_in);
1473 if (rc == -EAGAIN)
1474 VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1475 while (rc != 0) {
1476 if (rc != -EAGAIN) {
1477 WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1478 rc = 0;
1479 break;
1480 }
1481
1482 /* Wait for a short time and then try again */
1483 if (msleep_interruptible(100) != 0)
1484 return -EINTR;
1485 rc = usb_ep_set_wedge(fsg->bulk_in);
1486 }
1487 return rc;
1488}
1489
1490static int pad_with_zeros(struct fsg_dev *fsg)
1491{
1492 struct fsg_buffhd *bh = fsg->common->next_buffhd_to_fill;
1493 u32 nkeep = bh->inreq->length;
1494 u32 nsend;
1495 int rc;
1496
1497 bh->state = BUF_STATE_EMPTY; /* For the first iteration */
1498 fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1499 while (fsg->common->usb_amount_left > 0) {
1500
1501 /* Wait for the next buffer to be free */
1502 while (bh->state != BUF_STATE_EMPTY) {
1503 rc = sleep_thread(fsg->common);
1504 if (rc)
1505 return rc;
1506 }
1507
1508 nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
1509 memset(bh->buf + nkeep, 0, nsend - nkeep);
1510 bh->inreq->length = nsend;
1511 bh->inreq->zero = 0;
1512 start_transfer(fsg, fsg->bulk_in, bh->inreq,
1513 &bh->inreq_busy, &bh->state);
1514 bh = fsg->common->next_buffhd_to_fill = bh->next;
1515 fsg->common->usb_amount_left -= nsend;
1516 nkeep = 0;
1517 }
1518 return 0;
1519}
1520
1521static int throw_away_data(struct fsg_common *common)
1522{
1523 struct fsg_buffhd *bh;
1524 u32 amount;
1525 int rc;
1526
1527 for (bh = common->next_buffhd_to_drain;
1528 bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1529 bh = common->next_buffhd_to_drain) {
1530
1531 /* Throw away the data in a filled buffer */
1532 if (bh->state == BUF_STATE_FULL) {
1533 smp_rmb();
1534 bh->state = BUF_STATE_EMPTY;
1535 common->next_buffhd_to_drain = bh->next;
1536
1537 /* A short packet or an error ends everything */
1538 if (bh->outreq->actual != bh->outreq->length ||
1539 bh->outreq->status != 0) {
1540 raise_exception(common,
1541 FSG_STATE_ABORT_BULK_OUT);
1542 return -EINTR;
1543 }
1544 continue;
1545 }
1546
1547 /* Try to submit another request if we need one */
1548 bh = common->next_buffhd_to_fill;
1549 if (bh->state == BUF_STATE_EMPTY
1550 && common->usb_amount_left > 0) {
1551 amount = min(common->usb_amount_left, FSG_BUFLEN);
1552
1553 /* amount is always divisible by 512, hence by
1554 * the bulk-out maxpacket size */
1555 bh->outreq->length = amount;
1556 bh->bulk_out_intended_length = amount;
1557 bh->outreq->short_not_ok = 1;
1558 START_TRANSFER_OR(common, bulk_out, bh->outreq,
1559 &bh->outreq_busy, &bh->state)
1560 /* Don't know what to do if
1561 * common->fsg is NULL */
1562 return -EIO;
1563 common->next_buffhd_to_fill = bh->next;
1564 common->usb_amount_left -= amount;
1565 continue;
1566 }
1567
1568 /* Otherwise wait for something to happen */
1569 rc = sleep_thread(common);
1570 if (rc)
1571 return rc;
1572 }
1573 return 0;
1574}
1575
1576
1577static int finish_reply(struct fsg_common *common)
1578{
1579 struct fsg_buffhd *bh = common->next_buffhd_to_fill;
1580 int rc = 0;
1581
1582 switch (common->data_dir) {
1583 case DATA_DIR_NONE:
1584 break; /* Nothing to send */
1585
1586 /* If we don't know whether the host wants to read or write,
1587 * this must be CB or CBI with an unknown command. We mustn't
1588 * try to send or receive any data. So stall both bulk pipes
1589 * if we can and wait for a reset. */
1590 case DATA_DIR_UNKNOWN:
1591 if (!common->can_stall) {
1592 /* Nothing */
1593 } else if (fsg_is_set(common)) {
1594 fsg_set_halt(common->fsg, common->fsg->bulk_out);
1595 rc = halt_bulk_in_endpoint(common->fsg);
1596 } else {
1597 /* Don't know what to do if common->fsg is NULL */
1598 rc = -EIO;
1599 }
1600 break;
1601
1602 /* All but the last buffer of data must have already been sent */
1603 case DATA_DIR_TO_HOST:
1604 if (common->data_size == 0) {
1605 /* Nothing to send */
1606
1607 /* If there's no residue, simply send the last buffer */
1608 } else if (common->residue == 0) {
1609 bh->inreq->zero = 0;
1610 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1611 &bh->inreq_busy, &bh->state)
1612 return -EIO;
1613 common->next_buffhd_to_fill = bh->next;
1614
1615 /* For Bulk-only, if we're allowed to stall then send the
1616 * short packet and halt the bulk-in endpoint. If we can't
1617 * stall, pad out the remaining data with 0's. */
1618 } else if (common->can_stall) {
1619 bh->inreq->zero = 1;
1620 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1621 &bh->inreq_busy, &bh->state)
1622 /* Don't know what to do if
1623 * common->fsg is NULL */
1624 rc = -EIO;
1625 common->next_buffhd_to_fill = bh->next;
1626 if (common->fsg)
1627 rc = halt_bulk_in_endpoint(common->fsg);
1628 } else if (fsg_is_set(common)) {
1629 rc = pad_with_zeros(common->fsg);
1630 } else {
1631 /* Don't know what to do if common->fsg is NULL */
1632 rc = -EIO;
1633 }
1634 break;
1635
1636 /* We have processed all we want from the data the host has sent.
1637 * There may still be outstanding bulk-out requests. */
1638 case DATA_DIR_FROM_HOST:
1639 if (common->residue == 0) {
1640 /* Nothing to receive */
1641
1642 /* Did the host stop sending unexpectedly early? */
1643 } else if (common->short_packet_received) {
1644 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1645 rc = -EINTR;
1646
1647 /* We haven't processed all the incoming data. Even though
1648 * we may be allowed to stall, doing so would cause a race.
1649 * The controller may already have ACK'ed all the remaining
1650 * bulk-out packets, in which case the host wouldn't see a
1651 * STALL. Not realizing the endpoint was halted, it wouldn't
1652 * clear the halt -- leading to problems later on. */
1653#if 0
1654 } else if (common->can_stall) {
1655 if (fsg_is_set(common))
1656 fsg_set_halt(common->fsg,
1657 common->fsg->bulk_out);
1658 raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1659 rc = -EINTR;
1660#endif
1661
1662 /* We can't stall. Read in the excess data and throw it
1663 * all away. */
1664 } else {
1665 rc = throw_away_data(common);
1666 }
1667 break;
1668 }
1669 return rc;
1670}
1671
1672
1673static int send_status(struct fsg_common *common)
1674{
1675 struct fsg_lun *curlun = common->curlun;
1676 struct fsg_buffhd *bh;
1677 struct bulk_cs_wrap *csw;
1678 int rc;
1679 u8 status = USB_STATUS_PASS;
1680 u32 sd, sdinfo = 0;
1681
1682 /* Wait for the next buffer to become available */
1683 bh = common->next_buffhd_to_fill;
1684 while (bh->state != BUF_STATE_EMPTY) {
1685 rc = sleep_thread(common);
1686 if (rc)
1687 return rc;
1688 }
1689
1690 if (curlun) {
1691 sd = curlun->sense_data;
1692 sdinfo = curlun->sense_data_info;
1693 } else if (common->bad_lun_okay)
1694 sd = SS_NO_SENSE;
1695 else
1696 sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1697
1698 if (common->phase_error) {
1699 DBG(common, "sending phase-error status\n");
1700 status = USB_STATUS_PHASE_ERROR;
1701 sd = SS_INVALID_COMMAND;
1702 } else if (sd != SS_NO_SENSE) {
1703 DBG(common, "sending command-failure status\n");
1704 status = USB_STATUS_FAIL;
1705 VDBG(common, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1706 " info x%x\n",
1707 SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1708 }
1709
1710 /* Store and send the Bulk-only CSW */
1711 csw = (void *)bh->buf;
1712
1713 csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1714 csw->Tag = common->tag;
1715 csw->Residue = cpu_to_le32(common->residue);
1716 csw->Status = status;
1717
1718 bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1719 bh->inreq->zero = 0;
1720 START_TRANSFER_OR(common, bulk_in, bh->inreq,
1721 &bh->inreq_busy, &bh->state)
1722 /* Don't know what to do if common->fsg is NULL */
1723 return -EIO;
1724
1725 common->next_buffhd_to_fill = bh->next;
1726 return 0;
1727}
1728
1729
1730/*-------------------------------------------------------------------------*/
1731
1732/* Check whether the command is properly formed and whether its data size
1733 * and direction agree with the values we already have. */
1734static int check_command(struct fsg_common *common, int cmnd_size,
1735 enum data_direction data_dir, unsigned int mask,
1736 int needs_medium, const char *name)
1737{
1738 int i;
1739 int lun = common->cmnd[1] >> 5;
1740 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
1741 char hdlen[20];
1742 struct fsg_lun *curlun;
1743
1744 hdlen[0] = 0;
1745 if (common->data_dir != DATA_DIR_UNKNOWN)
1746 sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1747 common->data_size);
1748 VDBG(common, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
1749 name, cmnd_size, dirletter[(int) data_dir],
1750 common->data_size_from_cmnd, common->cmnd_size, hdlen);
1751
1752 /* We can't reply at all until we know the correct data direction
1753 * and size. */
1754 if (common->data_size_from_cmnd == 0)
1755 data_dir = DATA_DIR_NONE;
1756 if (common->data_size < common->data_size_from_cmnd) {
1757 /* Host data size < Device data size is a phase error.
1758 * Carry out the command, but only transfer as much as
1759 * we are allowed. */
1760 common->data_size_from_cmnd = common->data_size;
1761 common->phase_error = 1;
1762 }
1763 common->residue = common->data_size;
1764 common->usb_amount_left = common->data_size;
1765
1766 /* Conflicting data directions is a phase error */
1767 if (common->data_dir != data_dir
1768 && common->data_size_from_cmnd > 0) {
1769 common->phase_error = 1;
1770 return -EINVAL;
1771 }
1772
1773 /* Verify the length of the command itself */
1774 if (cmnd_size != common->cmnd_size) {
1775
1776 /* Special case workaround: There are plenty of buggy SCSI
1777 * implementations. Many have issues with cbw->Length
1778 * field passing a wrong command size. For those cases we
1779 * always try to work around the problem by using the length
1780 * sent by the host side provided it is at least as large
1781 * as the correct command length.
1782 * Examples of such cases would be MS-Windows, which issues
1783 * REQUEST SENSE with cbw->Length == 12 where it should
1784 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1785 * REQUEST SENSE with cbw->Length == 10 where it should
1786 * be 6 as well.
1787 */
1788 if (cmnd_size <= common->cmnd_size) {
1789 DBG(common, "%s is buggy! Expected length %d "
1790 "but we got %d\n", name,
1791 cmnd_size, common->cmnd_size);
1792 cmnd_size = common->cmnd_size;
1793 } else {
1794 common->phase_error = 1;
1795 return -EINVAL;
1796 }
1797 }
1798
1799 /* Check that the LUN values are consistent */
1800 if (common->lun != lun)
1801 DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1802 common->lun, lun);
1803
1804 /* Check the LUN */
1805 if (common->lun >= 0 && common->lun < common->nluns) {
1806 curlun = &common->luns[common->lun];
1807 common->curlun = curlun;
1808 if (common->cmnd[0] != SC_REQUEST_SENSE) {
1809 curlun->sense_data = SS_NO_SENSE;
1810 curlun->sense_data_info = 0;
1811 curlun->info_valid = 0;
1812 }
1813 } else {
1814 common->curlun = NULL;
1815 curlun = NULL;
1816 common->bad_lun_okay = 0;
1817
1818 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
1819 * to use unsupported LUNs; all others may not. */
1820 if (common->cmnd[0] != SC_INQUIRY &&
1821 common->cmnd[0] != SC_REQUEST_SENSE) {
1822 DBG(common, "unsupported LUN %d\n", common->lun);
1823 return -EINVAL;
1824 }
1825 }
1826
1827 /* If a unit attention condition exists, only INQUIRY and
1828 * REQUEST SENSE commands are allowed; anything else must fail. */
1829 if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1830 common->cmnd[0] != SC_INQUIRY &&
1831 common->cmnd[0] != SC_REQUEST_SENSE) {
1832 curlun->sense_data = curlun->unit_attention_data;
1833 curlun->unit_attention_data = SS_NO_SENSE;
1834 return -EINVAL;
1835 }
1836
1837 /* Check that only command bytes listed in the mask are non-zero */
1838 common->cmnd[1] &= 0x1f; /* Mask away the LUN */
1839 for (i = 1; i < cmnd_size; ++i) {
1840 if (common->cmnd[i] && !(mask & (1 << i))) {
1841 if (curlun)
1842 curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1843 return -EINVAL;
1844 }
1845 }
1846
1847 /* If the medium isn't mounted and the command needs to access
1848 * it, return an error. */
1849 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
1850 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
1851 return -EINVAL;
1852 }
1853
1854 return 0;
1855}
1856
1857
1858static int do_scsi_command(struct fsg_common *common)
1859{
1860 struct fsg_buffhd *bh;
1861 int rc;
1862 int reply = -EINVAL;
1863 int i;
1864 static char unknown[16];
1865
1866 dump_cdb(common);
1867
1868 /* Wait for the next buffer to become available for data or status */
1869 bh = common->next_buffhd_to_fill;
1870 common->next_buffhd_to_drain = bh;
1871 while (bh->state != BUF_STATE_EMPTY) {
1872 rc = sleep_thread(common);
1873 if (rc)
1874 return rc;
1875 }
1876 common->phase_error = 0;
1877 common->short_packet_received = 0;
1878
1879 down_read(&common->filesem); /* We're using the backing file */
1880 switch (common->cmnd[0]) {
1881
1882 case SC_INQUIRY:
1883 common->data_size_from_cmnd = common->cmnd[4];
1884 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1885 (1<<4), 0,
1886 "INQUIRY");
1887 if (reply == 0)
1888 reply = do_inquiry(common, bh);
1889 break;
1890
1891 case SC_MODE_SELECT_6:
1892 common->data_size_from_cmnd = common->cmnd[4];
1893 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1894 (1<<1) | (1<<4), 0,
1895 "MODE SELECT(6)");
1896 if (reply == 0)
1897 reply = do_mode_select(common, bh);
1898 break;
1899
1900 case SC_MODE_SELECT_10:
1901 common->data_size_from_cmnd =
1902 get_unaligned_be16(&common->cmnd[7]);
1903 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1904 (1<<1) | (3<<7), 0,
1905 "MODE SELECT(10)");
1906 if (reply == 0)
1907 reply = do_mode_select(common, bh);
1908 break;
1909
1910 case SC_MODE_SENSE_6:
1911 common->data_size_from_cmnd = common->cmnd[4];
1912 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1913 (1<<1) | (1<<2) | (1<<4), 0,
1914 "MODE SENSE(6)");
1915 if (reply == 0)
1916 reply = do_mode_sense(common, bh);
1917 break;
1918
1919 case SC_MODE_SENSE_10:
1920 common->data_size_from_cmnd =
1921 get_unaligned_be16(&common->cmnd[7]);
1922 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1923 (1<<1) | (1<<2) | (3<<7), 0,
1924 "MODE SENSE(10)");
1925 if (reply == 0)
1926 reply = do_mode_sense(common, bh);
1927 break;
1928
1929 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1930 common->data_size_from_cmnd = 0;
1931 reply = check_command(common, 6, DATA_DIR_NONE,
1932 (1<<4), 0,
1933 "PREVENT-ALLOW MEDIUM REMOVAL");
1934 if (reply == 0)
1935 reply = do_prevent_allow(common);
1936 break;
1937
1938 case SC_READ_6:
1939 i = common->cmnd[4];
1940 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1941 reply = check_command(common, 6, DATA_DIR_TO_HOST,
1942 (7<<1) | (1<<4), 1,
1943 "READ(6)");
1944 if (reply == 0)
1945 reply = do_read(common);
1946 break;
1947
1948 case SC_READ_10:
1949 common->data_size_from_cmnd =
1950 get_unaligned_be16(&common->cmnd[7]) << 9;
1951 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1952 (1<<1) | (0xf<<2) | (3<<7), 1,
1953 "READ(10)");
1954 if (reply == 0)
1955 reply = do_read(common);
1956 break;
1957
1958 case SC_READ_12:
1959 common->data_size_from_cmnd =
1960 get_unaligned_be32(&common->cmnd[6]) << 9;
1961 reply = check_command(common, 12, DATA_DIR_TO_HOST,
1962 (1<<1) | (0xf<<2) | (0xf<<6), 1,
1963 "READ(12)");
1964 if (reply == 0)
1965 reply = do_read(common);
1966 break;
1967
1968 case SC_READ_CAPACITY:
1969 common->data_size_from_cmnd = 8;
1970 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1971 (0xf<<2) | (1<<8), 1,
1972 "READ CAPACITY");
1973 if (reply == 0)
1974 reply = do_read_capacity(common, bh);
1975 break;
1976
1977 case SC_READ_HEADER:
1978 if (!common->curlun || !common->curlun->cdrom)
1979 goto unknown_cmnd;
1980 common->data_size_from_cmnd =
1981 get_unaligned_be16(&common->cmnd[7]);
1982 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1983 (3<<7) | (0x1f<<1), 1,
1984 "READ HEADER");
1985 if (reply == 0)
1986 reply = do_read_header(common, bh);
1987 break;
1988
1989 case SC_READ_TOC:
1990 if (!common->curlun || !common->curlun->cdrom)
1991 goto unknown_cmnd;
1992 common->data_size_from_cmnd =
1993 get_unaligned_be16(&common->cmnd[7]);
1994 reply = check_command(common, 10, DATA_DIR_TO_HOST,
1995 (7<<6) | (1<<1), 1,
1996 "READ TOC");
1997 if (reply == 0)
1998 reply = do_read_toc(common, bh);
1999 break;
2000
2001 case SC_READ_FORMAT_CAPACITIES:
2002 common->data_size_from_cmnd =
2003 get_unaligned_be16(&common->cmnd[7]);
2004 reply = check_command(common, 10, DATA_DIR_TO_HOST,
2005 (3<<7), 1,
2006 "READ FORMAT CAPACITIES");
2007 if (reply == 0)
2008 reply = do_read_format_capacities(common, bh);
2009 break;
2010
2011 case SC_REQUEST_SENSE:
2012 common->data_size_from_cmnd = common->cmnd[4];
2013 reply = check_command(common, 6, DATA_DIR_TO_HOST,
2014 (1<<4), 0,
2015 "REQUEST SENSE");
2016 if (reply == 0)
2017 reply = do_request_sense(common, bh);
2018 break;
2019
2020 case SC_START_STOP_UNIT:
2021 common->data_size_from_cmnd = 0;
2022 reply = check_command(common, 6, DATA_DIR_NONE,
2023 (1<<1) | (1<<4), 0,
2024 "START-STOP UNIT");
2025 if (reply == 0)
2026 reply = do_start_stop(common);
2027 break;
2028
2029 case SC_SYNCHRONIZE_CACHE:
2030 common->data_size_from_cmnd = 0;
2031 reply = check_command(common, 10, DATA_DIR_NONE,
2032 (0xf<<2) | (3<<7), 1,
2033 "SYNCHRONIZE CACHE");
2034 if (reply == 0)
2035 reply = do_synchronize_cache(common);
2036 break;
2037
2038 case SC_TEST_UNIT_READY:
2039 common->data_size_from_cmnd = 0;
2040 reply = check_command(common, 6, DATA_DIR_NONE,
2041 0, 1,
2042 "TEST UNIT READY");
2043 break;
2044
2045 /* Although optional, this command is used by MS-Windows. We
2046 * support a minimal version: BytChk must be 0. */
2047 case SC_VERIFY:
2048 common->data_size_from_cmnd = 0;
2049 reply = check_command(common, 10, DATA_DIR_NONE,
2050 (1<<1) | (0xf<<2) | (3<<7), 1,
2051 "VERIFY");
2052 if (reply == 0)
2053 reply = do_verify(common);
2054 break;
2055
2056 case SC_WRITE_6:
2057 i = common->cmnd[4];
2058 common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
2059 reply = check_command(common, 6, DATA_DIR_FROM_HOST,
2060 (7<<1) | (1<<4), 1,
2061 "WRITE(6)");
2062 if (reply == 0)
2063 reply = do_write(common);
2064 break;
2065
2066 case SC_WRITE_10:
2067 common->data_size_from_cmnd =
2068 get_unaligned_be16(&common->cmnd[7]) << 9;
2069 reply = check_command(common, 10, DATA_DIR_FROM_HOST,
2070 (1<<1) | (0xf<<2) | (3<<7), 1,
2071 "WRITE(10)");
2072 if (reply == 0)
2073 reply = do_write(common);
2074 break;
2075
2076 case SC_WRITE_12:
2077 common->data_size_from_cmnd =
2078 get_unaligned_be32(&common->cmnd[6]) << 9;
2079 reply = check_command(common, 12, DATA_DIR_FROM_HOST,
2080 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2081 "WRITE(12)");
2082 if (reply == 0)
2083 reply = do_write(common);
2084 break;
2085
2086 /* Some mandatory commands that we recognize but don't implement.
2087 * They don't mean much in this setting. It's left as an exercise
2088 * for anyone interested to implement RESERVE and RELEASE in terms
2089 * of Posix locks. */
2090 case SC_FORMAT_UNIT:
2091 case SC_RELEASE:
2092 case SC_RESERVE:
2093 case SC_SEND_DIAGNOSTIC:
2094 /* Fall through */
2095
2096 default:
2097unknown_cmnd:
2098 common->data_size_from_cmnd = 0;
2099 sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2100 reply = check_command(common, common->cmnd_size,
2101 DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2102 if (reply == 0) {
2103 common->curlun->sense_data = SS_INVALID_COMMAND;
2104 reply = -EINVAL;
2105 }
2106 break;
2107 }
2108 up_read(&common->filesem);
2109
2110 if (reply == -EINTR || signal_pending(current))
2111 return -EINTR;
2112
2113 /* Set up the single reply buffer for finish_reply() */
2114 if (reply == -EINVAL)
2115 reply = 0; /* Error reply length */
2116 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2117 reply = min((u32) reply, common->data_size_from_cmnd);
2118 bh->inreq->length = reply;
2119 bh->state = BUF_STATE_FULL;
2120 common->residue -= reply;
2121 } /* Otherwise it's already set */
2122
2123 return 0;
2124}
2125
2126
2127/*-------------------------------------------------------------------------*/
2128
2129static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2130{
2131 struct usb_request *req = bh->outreq;
2132 struct fsg_bulk_cb_wrap *cbw = req->buf;
2133 struct fsg_common *common = fsg->common;
2134
2135 /* Was this a real packet? Should it be ignored? */
2136 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2137 return -EINVAL;
2138
2139 /* Is the CBW valid? */
2140 if (req->actual != USB_BULK_CB_WRAP_LEN ||
2141 cbw->Signature != cpu_to_le32(
2142 USB_BULK_CB_SIG)) {
2143 DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2144 req->actual,
2145 le32_to_cpu(cbw->Signature));
2146
2147 /* The Bulk-only spec says we MUST stall the IN endpoint
2148 * (6.6.1), so it's unavoidable. It also says we must
2149 * retain this state until the next reset, but there's
2150 * no way to tell the controller driver it should ignore
2151 * Clear-Feature(HALT) requests.
2152 *
2153 * We aren't required to halt the OUT endpoint; instead
2154 * we can simply accept and discard any data received
2155 * until the next reset. */
2156 wedge_bulk_in_endpoint(fsg);
2157 set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2158 return -EINVAL;
2159 }
2160
2161 /* Is the CBW meaningful? */
2162 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2163 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2164 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2165 "cmdlen %u\n",
2166 cbw->Lun, cbw->Flags, cbw->Length);
2167
2168 /* We can do anything we want here, so let's stall the
2169 * bulk pipes if we are allowed to. */
2170 if (common->can_stall) {
2171 fsg_set_halt(fsg, fsg->bulk_out);
2172 halt_bulk_in_endpoint(fsg);
2173 }
2174 return -EINVAL;
2175 }
2176
2177 /* Save the command for later */
2178 common->cmnd_size = cbw->Length;
2179 memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2180 if (cbw->Flags & USB_BULK_IN_FLAG)
2181 common->data_dir = DATA_DIR_TO_HOST;
2182 else
2183 common->data_dir = DATA_DIR_FROM_HOST;
2184 common->data_size = le32_to_cpu(cbw->DataTransferLength);
2185 if (common->data_size == 0)
2186 common->data_dir = DATA_DIR_NONE;
2187 common->lun = cbw->Lun;
2188 common->tag = cbw->Tag;
2189 return 0;
2190}
2191
2192
2193static int get_next_command(struct fsg_common *common)
2194{
2195 struct fsg_buffhd *bh;
2196 int rc = 0;
2197
2198 /* Wait for the next buffer to become available */
2199 bh = common->next_buffhd_to_fill;
2200 while (bh->state != BUF_STATE_EMPTY) {
2201 rc = sleep_thread(common);
2202 if (rc)
2203 return rc;
2204 }
2205
2206 /* Queue a request to read a Bulk-only CBW */
2207 set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2208 bh->outreq->short_not_ok = 1;
2209 START_TRANSFER_OR(common, bulk_out, bh->outreq,
2210 &bh->outreq_busy, &bh->state)
2211 /* Don't know what to do if common->fsg is NULL */
2212 return -EIO;
2213
2214 /* We will drain the buffer in software, which means we
2215 * can reuse it for the next filling. No need to advance
2216 * next_buffhd_to_fill. */
2217
2218 /* Wait for the CBW to arrive */
2219 while (bh->state != BUF_STATE_FULL) {
2220 rc = sleep_thread(common);
2221 if (rc)
2222 return rc;
2223 }
2224 smp_rmb();
2225 rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2226 bh->state = BUF_STATE_EMPTY;
2227
2228 return rc;
2229}
2230
2231
2232/*-------------------------------------------------------------------------*/
2233
2234static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
2235 const struct usb_endpoint_descriptor *d)
2236{
2237 int rc;
2238
2239 ep->driver_data = common;
2240 rc = usb_ep_enable(ep, d);
2241 if (rc)
2242 ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
2243 return rc;
2244}
2245
2246static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2247 struct usb_request **preq)
2248{
2249 *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2250 if (*preq)
2251 return 0;
2252 ERROR(common, "can't allocate request for %s\n", ep->name);
2253 return -ENOMEM;
2254}
2255
2256/*
2257 * Reset interface setting and re-init endpoint state (toggle etc).
2258 * Call with altsetting < 0 to disable the interface. The only other
2259 * available altsetting is 0, which enables the interface.
2260 */
2261static int do_set_interface(struct fsg_common *common, int altsetting)
2262{
2263 int rc = 0;
2264 int i;
2265 const struct usb_endpoint_descriptor *d;
2266
2267 if (common->running)
2268 DBG(common, "reset interface\n");
2269
2270reset:
2271 /* Deallocate the requests */
2272 if (common->prev_fsg) {
2273 struct fsg_dev *fsg = common->prev_fsg;
2274
2275 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2276 struct fsg_buffhd *bh = &common->buffhds[i];
2277
2278 if (bh->inreq) {
2279 usb_ep_free_request(fsg->bulk_in, bh->inreq);
2280 bh->inreq = NULL;
2281 }
2282 if (bh->outreq) {
2283 usb_ep_free_request(fsg->bulk_out, bh->outreq);
2284 bh->outreq = NULL;
2285 }
2286 }
2287
2288 /* Disable the endpoints */
2289 if (fsg->bulk_in_enabled) {
2290 usb_ep_disable(fsg->bulk_in);
2291 fsg->bulk_in_enabled = 0;
2292 }
2293 if (fsg->bulk_out_enabled) {
2294 usb_ep_disable(fsg->bulk_out);
2295 fsg->bulk_out_enabled = 0;
2296 }
2297
2298 common->prev_fsg = 0;
2299 }
2300
2301 common->running = 0;
2302 if (altsetting < 0 || rc != 0)
2303 return rc;
2304
2305 DBG(common, "set interface %d\n", altsetting);
2306
2307 if (fsg_is_set(common)) {
2308 struct fsg_dev *fsg = common->fsg;
2309 common->prev_fsg = common->fsg;
2310
2311 /* Enable the endpoints */
2312 d = fsg_ep_desc(common->gadget,
2313 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
2314 rc = enable_endpoint(common, fsg->bulk_in, d);
2315 if (rc)
2316 goto reset;
2317 fsg->bulk_in_enabled = 1;
2318
2319 d = fsg_ep_desc(common->gadget,
2320 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
2321 rc = enable_endpoint(common, fsg->bulk_out, d);
2322 if (rc)
2323 goto reset;
2324 fsg->bulk_out_enabled = 1;
2325 common->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
2326 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2327
2328 /* Allocate the requests */
2329 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2330 struct fsg_buffhd *bh = &common->buffhds[i];
2331
2332 rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2333 if (rc)
2334 goto reset;
2335 rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2336 if (rc)
2337 goto reset;
2338 bh->inreq->buf = bh->outreq->buf = bh->buf;
2339 bh->inreq->context = bh->outreq->context = bh;
2340 bh->inreq->complete = bulk_in_complete;
2341 bh->outreq->complete = bulk_out_complete;
2342 }
2343
2344 common->running = 1;
2345 for (i = 0; i < common->nluns; ++i)
2346 common->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2347 return rc;
2348 } else {
2349 return -EIO;
2350 }
2351}
2352
2353
2354/*
2355 * Change our operational configuration. This code must agree with the code
2356 * that returns config descriptors, and with interface altsetting code.
2357 *
2358 * It's also responsible for power management interactions. Some
2359 * configurations might not work with our current power sources.
2360 * For now we just assume the gadget is always self-powered.
2361 */
2362static int do_set_config(struct fsg_common *common, u8 new_config)
2363{
2364 int rc = 0;
2365
2366 /* Disable the single interface */
2367 if (common->config != 0) {
2368 DBG(common, "reset config\n");
2369 common->config = 0;
2370 rc = do_set_interface(common, -1);
2371 }
2372
2373 /* Enable the interface */
2374 if (new_config != 0) {
2375 common->config = new_config;
2376 rc = do_set_interface(common, 0);
2377 if (rc != 0)
2378 common->config = 0; /* Reset on errors */
2379 }
2380 return rc;
2381}
2382
2383
2384/****************************** ALT CONFIGS ******************************/
2385
2386
2387static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2388{
2389 struct fsg_dev *fsg = fsg_from_func(f);
2390 fsg->common->prev_fsg = fsg->common->fsg;
2391 fsg->common->fsg = fsg;
2392 fsg->common->new_config = 1;
2393 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2394 return 0;
2395}
2396
2397static void fsg_disable(struct usb_function *f)
2398{
2399 struct fsg_dev *fsg = fsg_from_func(f);
2400 fsg->common->prev_fsg = fsg->common->fsg;
2401 fsg->common->fsg = fsg;
2402 fsg->common->new_config = 0;
2403 raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2404}
2405
2406
2407/*-------------------------------------------------------------------------*/
2408
2409static void handle_exception(struct fsg_common *common)
2410{
2411 siginfo_t info;
2412 int sig;
2413 int i;
2414 struct fsg_buffhd *bh;
2415 enum fsg_state old_state;
2416 u8 new_config;
2417 struct fsg_lun *curlun;
2418 unsigned int exception_req_tag;
2419 int rc;
2420
2421 /* Clear the existing signals. Anything but SIGUSR1 is converted
2422 * into a high-priority EXIT exception. */
2423 for (;;) {
2424 sig = dequeue_signal_lock(current, &current->blocked, &info);
2425 if (!sig)
2426 break;
2427 if (sig != SIGUSR1) {
2428 if (common->state < FSG_STATE_EXIT)
2429 DBG(common, "Main thread exiting on signal\n");
2430 raise_exception(common, FSG_STATE_EXIT);
2431 }
2432 }
2433
2434 /* Cancel all the pending transfers */
2435 if (fsg_is_set(common)) {
2436 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2437 bh = &common->buffhds[i];
2438 if (bh->inreq_busy)
2439 usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2440 if (bh->outreq_busy)
2441 usb_ep_dequeue(common->fsg->bulk_out,
2442 bh->outreq);
2443 }
2444
2445 /* Wait until everything is idle */
2446 for (;;) {
2447 int num_active = 0;
2448 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2449 bh = &common->buffhds[i];
2450 num_active += bh->inreq_busy + bh->outreq_busy;
2451 }
2452 if (num_active == 0)
2453 break;
2454 if (sleep_thread(common))
2455 return;
2456 }
2457
2458 /* Clear out the controller's fifos */
2459 if (common->fsg->bulk_in_enabled)
2460 usb_ep_fifo_flush(common->fsg->bulk_in);
2461 if (common->fsg->bulk_out_enabled)
2462 usb_ep_fifo_flush(common->fsg->bulk_out);
2463 }
2464
2465 /* Reset the I/O buffer states and pointers, the SCSI
2466 * state, and the exception. Then invoke the handler. */
2467 spin_lock_irq(&common->lock);
2468
2469 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2470 bh = &common->buffhds[i];
2471 bh->state = BUF_STATE_EMPTY;
2472 }
2473 common->next_buffhd_to_fill = &common->buffhds[0];
2474 common->next_buffhd_to_drain = &common->buffhds[0];
2475 exception_req_tag = common->exception_req_tag;
2476 new_config = common->new_config;
2477 old_state = common->state;
2478
2479 if (old_state == FSG_STATE_ABORT_BULK_OUT)
2480 common->state = FSG_STATE_STATUS_PHASE;
2481 else {
2482 for (i = 0; i < common->nluns; ++i) {
2483 curlun = &common->luns[i];
2484 curlun->prevent_medium_removal = 0;
2485 curlun->sense_data = SS_NO_SENSE;
2486 curlun->unit_attention_data = SS_NO_SENSE;
2487 curlun->sense_data_info = 0;
2488 curlun->info_valid = 0;
2489 }
2490 common->state = FSG_STATE_IDLE;
2491 }
2492 spin_unlock_irq(&common->lock);
2493
2494 /* Carry out any extra actions required for the exception */
2495 switch (old_state) {
2496 case FSG_STATE_ABORT_BULK_OUT:
2497 send_status(common);
2498 spin_lock_irq(&common->lock);
2499 if (common->state == FSG_STATE_STATUS_PHASE)
2500 common->state = FSG_STATE_IDLE;
2501 spin_unlock_irq(&common->lock);
2502 break;
2503
2504 case FSG_STATE_RESET:
2505 /* In case we were forced against our will to halt a
2506 * bulk endpoint, clear the halt now. (The SuperH UDC
2507 * requires this.) */
2508 if (!fsg_is_set(common))
2509 break;
2510 if (test_and_clear_bit(IGNORE_BULK_OUT,
2511 &common->fsg->atomic_bitflags))
2512 usb_ep_clear_halt(common->fsg->bulk_in);
2513
2514 if (common->ep0_req_tag == exception_req_tag)
2515 ep0_queue(common); /* Complete the status stage */
2516
2517 /* Technically this should go here, but it would only be
2518 * a waste of time. Ditto for the INTERFACE_CHANGE and
2519 * CONFIG_CHANGE cases. */
2520 /* for (i = 0; i < common->nluns; ++i) */
2521 /* common->luns[i].unit_attention_data = */
2522 /* SS_RESET_OCCURRED; */
2523 break;
2524
2525 case FSG_STATE_CONFIG_CHANGE:
2526 rc = do_set_config(common, new_config);
2527 if (common->ep0_req_tag != exception_req_tag)
2528 break;
2529 if (rc != 0) { /* STALL on errors */
2530 DBG(common, "ep0 set halt\n");
2531 usb_ep_set_halt(common->ep0);
2532 } else { /* Complete the status stage */
2533 ep0_queue(common);
2534 }
2535 break;
2536
2537 case FSG_STATE_EXIT:
2538 case FSG_STATE_TERMINATED:
2539 do_set_config(common, 0); /* Free resources */
2540 spin_lock_irq(&common->lock);
2541 common->state = FSG_STATE_TERMINATED; /* Stop the thread */
2542 spin_unlock_irq(&common->lock);
2543 break;
2544
2545 case FSG_STATE_INTERFACE_CHANGE:
2546 case FSG_STATE_DISCONNECT:
2547 case FSG_STATE_COMMAND_PHASE:
2548 case FSG_STATE_DATA_PHASE:
2549 case FSG_STATE_STATUS_PHASE:
2550 case FSG_STATE_IDLE:
2551 break;
2552 }
2553}
2554
2555
2556/*-------------------------------------------------------------------------*/
2557
2558static int fsg_main_thread(void *common_)
2559{
2560 struct fsg_common *common = common_;
2561
2562 /* Allow the thread to be killed by a signal, but set the signal mask
2563 * to block everything but INT, TERM, KILL, and USR1. */
2564 allow_signal(SIGINT);
2565 allow_signal(SIGTERM);
2566 allow_signal(SIGKILL);
2567 allow_signal(SIGUSR1);
2568
2569 /* Allow the thread to be frozen */
2570 set_freezable();
2571
2572 /* Arrange for userspace references to be interpreted as kernel
2573 * pointers. That way we can pass a kernel pointer to a routine
2574 * that expects a __user pointer and it will work okay. */
2575 set_fs(get_ds());
2576
2577 /* The main loop */
2578 while (common->state != FSG_STATE_TERMINATED) {
2579 if (exception_in_progress(common) || signal_pending(current)) {
2580 handle_exception(common);
2581 continue;
2582 }
2583
2584 if (!common->running) {
2585 sleep_thread(common);
2586 continue;
2587 }
2588
2589 if (get_next_command(common))
2590 continue;
2591
2592 spin_lock_irq(&common->lock);
2593 if (!exception_in_progress(common))
2594 common->state = FSG_STATE_DATA_PHASE;
2595 spin_unlock_irq(&common->lock);
2596
2597 if (do_scsi_command(common) || finish_reply(common))
2598 continue;
2599
2600 spin_lock_irq(&common->lock);
2601 if (!exception_in_progress(common))
2602 common->state = FSG_STATE_STATUS_PHASE;
2603 spin_unlock_irq(&common->lock);
2604
2605 if (send_status(common))
2606 continue;
2607
2608 spin_lock_irq(&common->lock);
2609 if (!exception_in_progress(common))
2610 common->state = FSG_STATE_IDLE;
2611 spin_unlock_irq(&common->lock);
2612 }
2613
2614 spin_lock_irq(&common->lock);
2615 common->thread_task = NULL;
2616 spin_unlock_irq(&common->lock);
2617
2618 if (common->thread_exits)
2619 common->thread_exits(common);
2620
2621 /* Let the unbind and cleanup routines know the thread has exited */
2622 complete_and_exit(&common->thread_notifier, 0);
2623}
2624
2625
2626/*************************** DEVICE ATTRIBUTES ***************************/
2627
2628/* Write permission is checked per LUN in store_*() functions. */
2629static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
2630static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
2631
2632
2633/****************************** FSG COMMON ******************************/
2634
2635static void fsg_common_release(struct kref *ref);
2636
2637static void fsg_lun_release(struct device *dev)
2638{
2639 /* Nothing needs to be done */
2640}
2641
2642static inline void fsg_common_get(struct fsg_common *common)
2643{
2644 kref_get(&common->ref);
2645}
2646
2647static inline void fsg_common_put(struct fsg_common *common)
2648{
2649 kref_put(&common->ref, fsg_common_release);
2650}
2651
2652
2653static struct fsg_common *fsg_common_init(struct fsg_common *common,
2654 struct usb_composite_dev *cdev,
2655 struct fsg_config *cfg)
2656{
2657 struct usb_gadget *gadget = cdev->gadget;
2658 struct fsg_buffhd *bh;
2659 struct fsg_lun *curlun;
2660 struct fsg_lun_config *lcfg;
2661 int nluns, i, rc;
2662 char *pathbuf;
2663
2664 /* Find out how many LUNs there should be */
2665 nluns = cfg->nluns;
2666 if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2667 dev_err(&gadget->dev, "invalid number of LUNs: %u\n", nluns);
2668 return ERR_PTR(-EINVAL);
2669 }
2670
2671 /* Allocate? */
2672 if (!common) {
2673 common = kzalloc(sizeof *common, GFP_KERNEL);
2674 if (!common)
2675 return ERR_PTR(-ENOMEM);
2676 common->free_storage_on_release = 1;
2677 } else {
2678 memset(common, 0, sizeof common);
2679 common->free_storage_on_release = 0;
2680 }
2681
2682 common->private_data = cfg->private_data;
2683
2684 common->gadget = gadget;
2685 common->ep0 = gadget->ep0;
2686 common->ep0req = cdev->req;
2687
2688 /* Maybe allocate device-global string IDs, and patch descriptors */
2689 if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2690 rc = usb_string_id(cdev);
2691 if (rc < 0) {
2692 kfree(common);
2693 return ERR_PTR(rc);
2694 }
2695 fsg_strings[FSG_STRING_INTERFACE].id = rc;
2696 fsg_intf_desc.iInterface = rc;
2697 }
2698
2699 /* Create the LUNs, open their backing files, and register the
2700 * LUN devices in sysfs. */
2701 curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
2702 if (!curlun) {
2703 kfree(common);
2704 return ERR_PTR(-ENOMEM);
2705 }
2706 common->luns = curlun;
2707
2708 init_rwsem(&common->filesem);
2709
2710 for (i = 0, lcfg = cfg->luns; i < nluns; ++i, ++curlun, ++lcfg) {
2711 curlun->cdrom = !!lcfg->cdrom;
2712 curlun->ro = lcfg->cdrom || lcfg->ro;
2713 curlun->removable = lcfg->removable;
2714 curlun->dev.release = fsg_lun_release;
2715 curlun->dev.parent = &gadget->dev;
2716 /* curlun->dev.driver = &fsg_driver.driver; XXX */
2717 dev_set_drvdata(&curlun->dev, &common->filesem);
2718 dev_set_name(&curlun->dev,
2719 cfg->lun_name_format
2720 ? cfg->lun_name_format
2721 : "lun%d",
2722 i);
2723
2724 rc = device_register(&curlun->dev);
2725 if (rc) {
2726 INFO(common, "failed to register LUN%d: %d\n", i, rc);
2727 common->nluns = i;
2728 goto error_release;
2729 }
2730
2731 rc = device_create_file(&curlun->dev, &dev_attr_ro);
2732 if (rc)
2733 goto error_luns;
2734 rc = device_create_file(&curlun->dev, &dev_attr_file);
2735 if (rc)
2736 goto error_luns;
2737
2738 if (lcfg->filename) {
2739 rc = fsg_lun_open(curlun, lcfg->filename);
2740 if (rc)
2741 goto error_luns;
2742 } else if (!curlun->removable) {
2743 ERROR(common, "no file given for LUN%d\n", i);
2744 rc = -EINVAL;
2745 goto error_luns;
2746 }
2747 }
2748 common->nluns = nluns;
2749
2750
2751 /* Data buffers cyclic list */
2752 /* Buffers in buffhds are static -- no need for additional
2753 * allocation. */
2754 bh = common->buffhds;
2755 i = FSG_NUM_BUFFERS - 1;
2756 do {
2757 bh->next = bh + 1;
2758 } while (++bh, --i);
2759 bh->next = common->buffhds;
2760
2761
2762 /* Prepare inquiryString */
2763 if (cfg->release != 0xffff) {
2764 i = cfg->release;
2765 } else {
2766 /* The sa1100 controller is not supported */
2767 i = gadget_is_sa1100(gadget)
2768 ? -1
2769 : usb_gadget_controller_number(gadget);
2770 if (i >= 0) {
2771 i = 0x0300 + i;
2772 } else {
2773 WARNING(common, "controller '%s' not recognized\n",
2774 gadget->name);
2775 i = 0x0399;
2776 }
2777 }
2778#define OR(x, y) ((x) ? (x) : (y))
2779 snprintf(common->inquiry_string, sizeof common->inquiry_string,
2780 "%-8s%-16s%04x",
2781 OR(cfg->vendor_name, "Linux "),
2782 /* Assume product name dependent on the first LUN */
2783 OR(cfg->product_name, common->luns->cdrom
2784 ? "File-Stor Gadget"
2785 : "File-CD Gadget "),
2786 i);
2787
2788
2789 /* Some peripheral controllers are known not to be able to
2790 * halt bulk endpoints correctly. If one of them is present,
2791 * disable stalls.
2792 */
2793 common->can_stall = cfg->can_stall &&
2794 !(gadget_is_sh(common->gadget) ||
2795 gadget_is_at91(common->gadget));
2796
2797
2798 spin_lock_init(&common->lock);
2799 kref_init(&common->ref);
2800
2801
2802 /* Tell the thread to start working */
2803 common->thread_exits = cfg->thread_exits;
2804 common->thread_task =
2805 kthread_create(fsg_main_thread, common,
2806 OR(cfg->thread_name, "file-storage"));
2807 if (IS_ERR(common->thread_task)) {
2808 rc = PTR_ERR(common->thread_task);
2809 goto error_release;
2810 }
2811 init_completion(&common->thread_notifier);
2812#undef OR
2813
2814
2815 /* Information */
2816 INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2817 INFO(common, "Number of LUNs=%d\n", common->nluns);
2818
2819 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
2820 for (i = 0, nluns = common->nluns, curlun = common->luns;
2821 i < nluns;
2822 ++curlun, ++i) {
2823 char *p = "(no medium)";
2824 if (fsg_lun_is_open(curlun)) {
2825 p = "(error)";
2826 if (pathbuf) {
2827 p = d_path(&curlun->filp->f_path,
2828 pathbuf, PATH_MAX);
2829 if (IS_ERR(p))
2830 p = "(error)";
2831 }
2832 }
2833 LINFO(curlun, "LUN: %s%s%sfile: %s\n",
2834 curlun->removable ? "removable " : "",
2835 curlun->ro ? "read only " : "",
2836 curlun->cdrom ? "CD-ROM " : "",
2837 p);
2838 }
2839 kfree(pathbuf);
2840
2841 DBG(common, "I/O thread pid: %d\n", task_pid_nr(common->thread_task));
2842
2843 wake_up_process(common->thread_task);
2844
2845 return common;
2846
2847
2848error_luns:
2849 common->nluns = i + 1;
2850error_release:
2851 common->state = FSG_STATE_TERMINATED; /* The thread is dead */
2852 /* Call fsg_common_release() directly, ref might be not
2853 * initialised */
2854 fsg_common_release(&common->ref);
2855 complete(&common->thread_notifier);
2856 return ERR_PTR(rc);
2857}
2858
2859
2860static void fsg_common_release(struct kref *ref)
2861{
2862 struct fsg_common *common =
2863 container_of(ref, struct fsg_common, ref);
2864 unsigned i = common->nluns;
2865 struct fsg_lun *lun = common->luns;
2866
2867 /* If the thread isn't already dead, tell it to exit now */
2868 if (common->state != FSG_STATE_TERMINATED) {
2869 raise_exception(common, FSG_STATE_EXIT);
2870 wait_for_completion(&common->thread_notifier);
2871
2872 /* The cleanup routine waits for this completion also */
2873 complete(&common->thread_notifier);
2874 }
2875
2876 /* Beware tempting for -> do-while optimization: when in error
2877 * recovery nluns may be zero. */
2878
2879 for (; i; --i, ++lun) {
2880 device_remove_file(&lun->dev, &dev_attr_ro);
2881 device_remove_file(&lun->dev, &dev_attr_file);
2882 fsg_lun_close(lun);
2883 device_unregister(&lun->dev);
2884 }
2885
2886 kfree(common->luns);
2887 if (common->free_storage_on_release)
2888 kfree(common);
2889}
2890
2891
2892/*-------------------------------------------------------------------------*/
2893
2894
2895static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2896{
2897 struct fsg_dev *fsg = fsg_from_func(f);
2898
2899 DBG(fsg, "unbind\n");
2900 fsg_common_put(fsg->common);
2901 kfree(fsg);
2902}
2903
2904
2905static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2906{
2907 struct fsg_dev *fsg = fsg_from_func(f);
2908 struct usb_gadget *gadget = c->cdev->gadget;
2909 int rc;
2910 int i;
2911 struct usb_ep *ep;
2912
2913 fsg->gadget = gadget;
2914
2915 /* New interface */
2916 i = usb_interface_id(c, f);
2917 if (i < 0)
2918 return i;
2919 fsg_intf_desc.bInterfaceNumber = i;
2920 fsg->interface_number = i;
2921
2922 /* Find all the endpoints we will use */
2923 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2924 if (!ep)
2925 goto autoconf_fail;
2926 ep->driver_data = fsg->common; /* claim the endpoint */
2927 fsg->bulk_in = ep;
2928
2929 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2930 if (!ep)
2931 goto autoconf_fail;
2932 ep->driver_data = fsg->common; /* claim the endpoint */
2933 fsg->bulk_out = ep;
2934
2935 if (gadget_is_dualspeed(gadget)) {
2936 /* Assume endpoint addresses are the same for both speeds */
2937 fsg_hs_bulk_in_desc.bEndpointAddress =
2938 fsg_fs_bulk_in_desc.bEndpointAddress;
2939 fsg_hs_bulk_out_desc.bEndpointAddress =
2940 fsg_fs_bulk_out_desc.bEndpointAddress;
2941 f->hs_descriptors = fsg_hs_function;
2942 }
2943
2944 return 0;
2945
2946autoconf_fail:
2947 ERROR(fsg, "unable to autoconfigure all endpoints\n");
2948 rc = -ENOTSUPP;
2949 fsg_unbind(c, f);
2950 return rc;
2951}
2952
2953
2954/****************************** ADD FUNCTION ******************************/
2955
2956static struct usb_gadget_strings *fsg_strings_array[] = {
2957 &fsg_stringtab,
2958 NULL,
2959};
2960
2961static int fsg_add(struct usb_composite_dev *cdev,
2962 struct usb_configuration *c,
2963 struct fsg_common *common)
2964{
2965 struct fsg_dev *fsg;
2966 int rc;
2967
2968 fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
2969 if (unlikely(!fsg))
2970 return -ENOMEM;
2971
2972 fsg->function.name = FSG_DRIVER_DESC;
2973 fsg->function.strings = fsg_strings_array;
2974 fsg->function.descriptors = fsg_fs_function;
2975 fsg->function.bind = fsg_bind;
2976 fsg->function.unbind = fsg_unbind;
2977 fsg->function.setup = fsg_setup;
2978 fsg->function.set_alt = fsg_set_alt;
2979 fsg->function.disable = fsg_disable;
2980
2981 fsg->common = common;
2982 /* Our caller holds a reference to common structure so we
2983 * don't have to be worry about it being freed until we return
2984 * from this function. So instead of incrementing counter now
2985 * and decrement in error recovery we increment it only when
2986 * call to usb_add_function() was successful. */
2987
2988 rc = usb_add_function(c, &fsg->function);
2989
2990 if (likely(rc == 0))
2991 fsg_common_get(fsg->common);
2992 else
2993 kfree(fsg);
2994
2995 return rc;
2996}
2997
2998
2999
3000/************************* Module parameters *************************/
3001
3002
3003struct fsg_module_parameters {
3004 char *file[FSG_MAX_LUNS];
3005 int ro[FSG_MAX_LUNS];
3006 int removable[FSG_MAX_LUNS];
3007 int cdrom[FSG_MAX_LUNS];
3008
3009 unsigned int file_count, ro_count, removable_count, cdrom_count;
3010 unsigned int luns; /* nluns */
3011 int stall; /* can_stall */
3012};
3013
3014
3015#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
3016 module_param_array_named(prefix ## name, params.name, type, \
3017 &prefix ## params.name ## _count, \
3018 S_IRUGO); \
3019 MODULE_PARM_DESC(prefix ## name, desc)
3020
3021#define _FSG_MODULE_PARAM(prefix, params, name, type, desc) \
3022 module_param_named(prefix ## name, params.name, type, \
3023 S_IRUGO); \
3024 MODULE_PARM_DESC(prefix ## name, desc)
3025
3026#define FSG_MODULE_PARAMETERS(prefix, params) \
3027 _FSG_MODULE_PARAM_ARRAY(prefix, params, file, charp, \
3028 "names of backing files or devices"); \
3029 _FSG_MODULE_PARAM_ARRAY(prefix, params, ro, bool, \
3030 "true to force read-only"); \
3031 _FSG_MODULE_PARAM_ARRAY(prefix, params, removable, bool, \
3032 "true to simulate removable media"); \
3033 _FSG_MODULE_PARAM_ARRAY(prefix, params, cdrom, bool, \
3034 "true to simulate CD-ROM instead of disk"); \
3035 _FSG_MODULE_PARAM(prefix, params, luns, uint, \
3036 "number of LUNs"); \
3037 _FSG_MODULE_PARAM(prefix, params, stall, bool, \
3038 "false to prevent bulk stalls")
3039
3040
3041static void
3042fsg_config_from_params(struct fsg_config *cfg,
3043 const struct fsg_module_parameters *params)
3044{
3045 struct fsg_lun_config *lun;
3046 unsigned i;
3047
3048 /* Configure LUNs */
3049 cfg->nluns =
3050 min(params->luns ?: (params->file_count ?: 1u),
3051 (unsigned)FSG_MAX_LUNS);
3052 for (i = 0, lun = cfg->luns; i < cfg->nluns; ++i, ++lun) {
3053 lun->ro = !!params->ro[i];
3054 lun->cdrom = !!params->cdrom[i];
3055 lun->removable = /* Removable by default */
3056 params->removable_count <= i || params->removable[i];
3057 lun->filename =
3058 params->file_count > i && params->file[i][0]
3059 ? params->file[i]
3060 : 0;
3061 }
3062
3063 /* Let MSF use defaults */
3064 cfg->lun_name_format = 0;
3065 cfg->thread_name = 0;
3066 cfg->vendor_name = 0;
3067 cfg->product_name = 0;
3068 cfg->release = 0xffff;
3069
3070 cfg->thread_exits = 0;
3071 cfg->private_data = 0;
3072
3073 /* Finalise */
3074 cfg->can_stall = params->stall;
3075}
3076
3077static inline struct fsg_common *
3078fsg_common_from_params(struct fsg_common *common,
3079 struct usb_composite_dev *cdev,
3080 const struct fsg_module_parameters *params)
3081 __attribute__((unused));
3082static inline struct fsg_common *
3083fsg_common_from_params(struct fsg_common *common,
3084 struct usb_composite_dev *cdev,
3085 const struct fsg_module_parameters *params)
3086{
3087 struct fsg_config cfg;
3088 fsg_config_from_params(&cfg, params);
3089 return fsg_common_init(common, cdev, &cfg);
3090}
3091
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index c9966cc07d3a..95dae4c1ea40 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -4,6 +4,8 @@
4 * Copyright (C) 2003-2005,2008 David Brownell 4 * Copyright (C) 2003-2005,2008 David Brownell
5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger 5 * Copyright (C) 2003-2004 Robert Schwebel, Benedikt Spranger
6 * Copyright (C) 2008 Nokia Corporation 6 * Copyright (C) 2008 Nokia Corporation
7 * Copyright (C) 2009 Samsung Electronics
8 * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
7 * 9 *
8 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 11 * it under the terms of the GNU General Public License as published by
@@ -149,8 +151,8 @@ static struct usb_cdc_call_mgmt_descriptor call_mgmt_descriptor __initdata = {
149 .bDataInterface = 0x01, 151 .bDataInterface = 0x01,
150}; 152};
151 153
152static struct usb_cdc_acm_descriptor acm_descriptor __initdata = { 154static struct usb_cdc_acm_descriptor rndis_acm_descriptor __initdata = {
153 .bLength = sizeof acm_descriptor, 155 .bLength = sizeof rndis_acm_descriptor,
154 .bDescriptorType = USB_DT_CS_INTERFACE, 156 .bDescriptorType = USB_DT_CS_INTERFACE,
155 .bDescriptorSubType = USB_CDC_ACM_TYPE, 157 .bDescriptorSubType = USB_CDC_ACM_TYPE,
156 158
@@ -179,6 +181,20 @@ static struct usb_interface_descriptor rndis_data_intf __initdata = {
179 /* .iInterface = DYNAMIC */ 181 /* .iInterface = DYNAMIC */
180}; 182};
181 183
184
185static struct usb_interface_assoc_descriptor
186rndis_iad_descriptor = {
187 .bLength = sizeof rndis_iad_descriptor,
188 .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
189
190 .bFirstInterface = 0, /* XXX, hardcoded */
191 .bInterfaceCount = 2, // control + data
192 .bFunctionClass = USB_CLASS_COMM,
193 .bFunctionSubClass = USB_CDC_SUBCLASS_ETHERNET,
194 .bFunctionProtocol = USB_CDC_PROTO_NONE,
195 /* .iFunction = DYNAMIC */
196};
197
182/* full speed support: */ 198/* full speed support: */
183 199
184static struct usb_endpoint_descriptor fs_notify_desc __initdata = { 200static struct usb_endpoint_descriptor fs_notify_desc __initdata = {
@@ -208,11 +224,12 @@ static struct usb_endpoint_descriptor fs_out_desc __initdata = {
208}; 224};
209 225
210static struct usb_descriptor_header *eth_fs_function[] __initdata = { 226static struct usb_descriptor_header *eth_fs_function[] __initdata = {
227 (struct usb_descriptor_header *) &rndis_iad_descriptor,
211 /* control interface matches ACM, not Ethernet */ 228 /* control interface matches ACM, not Ethernet */
212 (struct usb_descriptor_header *) &rndis_control_intf, 229 (struct usb_descriptor_header *) &rndis_control_intf,
213 (struct usb_descriptor_header *) &header_desc, 230 (struct usb_descriptor_header *) &header_desc,
214 (struct usb_descriptor_header *) &call_mgmt_descriptor, 231 (struct usb_descriptor_header *) &call_mgmt_descriptor,
215 (struct usb_descriptor_header *) &acm_descriptor, 232 (struct usb_descriptor_header *) &rndis_acm_descriptor,
216 (struct usb_descriptor_header *) &rndis_union_desc, 233 (struct usb_descriptor_header *) &rndis_union_desc,
217 (struct usb_descriptor_header *) &fs_notify_desc, 234 (struct usb_descriptor_header *) &fs_notify_desc,
218 /* data interface has no altsetting */ 235 /* data interface has no altsetting */
@@ -252,11 +269,12 @@ static struct usb_endpoint_descriptor hs_out_desc __initdata = {
252}; 269};
253 270
254static struct usb_descriptor_header *eth_hs_function[] __initdata = { 271static struct usb_descriptor_header *eth_hs_function[] __initdata = {
272 (struct usb_descriptor_header *) &rndis_iad_descriptor,
255 /* control interface matches ACM, not Ethernet */ 273 /* control interface matches ACM, not Ethernet */
256 (struct usb_descriptor_header *) &rndis_control_intf, 274 (struct usb_descriptor_header *) &rndis_control_intf,
257 (struct usb_descriptor_header *) &header_desc, 275 (struct usb_descriptor_header *) &header_desc,
258 (struct usb_descriptor_header *) &call_mgmt_descriptor, 276 (struct usb_descriptor_header *) &call_mgmt_descriptor,
259 (struct usb_descriptor_header *) &acm_descriptor, 277 (struct usb_descriptor_header *) &rndis_acm_descriptor,
260 (struct usb_descriptor_header *) &rndis_union_desc, 278 (struct usb_descriptor_header *) &rndis_union_desc,
261 (struct usb_descriptor_header *) &hs_notify_desc, 279 (struct usb_descriptor_header *) &hs_notify_desc,
262 /* data interface has no altsetting */ 280 /* data interface has no altsetting */
@@ -271,6 +289,7 @@ static struct usb_descriptor_header *eth_hs_function[] __initdata = {
271static struct usb_string rndis_string_defs[] = { 289static struct usb_string rndis_string_defs[] = {
272 [0].s = "RNDIS Communications Control", 290 [0].s = "RNDIS Communications Control",
273 [1].s = "RNDIS Ethernet Data", 291 [1].s = "RNDIS Ethernet Data",
292 [2].s = "RNDIS",
274 { } /* end of list */ 293 { } /* end of list */
275}; 294};
276 295
@@ -587,6 +606,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
587 if (status < 0) 606 if (status < 0)
588 goto fail; 607 goto fail;
589 rndis->ctrl_id = status; 608 rndis->ctrl_id = status;
609 rndis_iad_descriptor.bFirstInterface = status;
590 610
591 rndis_control_intf.bInterfaceNumber = status; 611 rndis_control_intf.bInterfaceNumber = status;
592 rndis_union_desc.bMasterInterface0 = status; 612 rndis_union_desc.bMasterInterface0 = status;
@@ -798,6 +818,13 @@ int __init rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN])
798 return status; 818 return status;
799 rndis_string_defs[1].id = status; 819 rndis_string_defs[1].id = status;
800 rndis_data_intf.iInterface = status; 820 rndis_data_intf.iInterface = status;
821
822 /* IAD iFunction label */
823 status = usb_string_id(c->cdev);
824 if (status < 0)
825 return status;
826 rndis_string_defs[2].id = status;
827 rndis_iad_descriptor.iFunction = status;
801 } 828 }
802 829
803 /* allocate and initialize one new instance */ 830 /* allocate and initialize one new instance */
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 1e6aa504d58a..fca3407e48f2 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -248,8 +248,6 @@
248#include <linux/freezer.h> 248#include <linux/freezer.h>
249#include <linux/utsname.h> 249#include <linux/utsname.h>
250 250
251#include <asm/unaligned.h>
252
253#include <linux/usb/ch9.h> 251#include <linux/usb/ch9.h>
254#include <linux/usb/gadget.h> 252#include <linux/usb/gadget.h>
255 253
@@ -274,21 +272,20 @@
274#define DRIVER_NAME "g_file_storage" 272#define DRIVER_NAME "g_file_storage"
275#define DRIVER_VERSION "20 November 2008" 273#define DRIVER_VERSION "20 November 2008"
276 274
277static const char longname[] = DRIVER_DESC; 275static char fsg_string_manufacturer[64];
278static const char shortname[] = DRIVER_NAME; 276static const char fsg_string_product[] = DRIVER_DESC;
277static char fsg_string_serial[13];
278static const char fsg_string_config[] = "Self-powered";
279static const char fsg_string_interface[] = "Mass Storage";
280
281
282#include "storage_common.c"
283
279 284
280MODULE_DESCRIPTION(DRIVER_DESC); 285MODULE_DESCRIPTION(DRIVER_DESC);
281MODULE_AUTHOR("Alan Stern"); 286MODULE_AUTHOR("Alan Stern");
282MODULE_LICENSE("Dual BSD/GPL"); 287MODULE_LICENSE("Dual BSD/GPL");
283 288
284/* Thanks to NetChip Technologies for donating this product ID.
285 *
286 * DO NOT REUSE THESE IDs with any other driver!! Ever!!
287 * Instead: allocate your own, using normal USB-IF procedures. */
288#define DRIVER_VENDOR_ID 0x0525 // NetChip
289#define DRIVER_PRODUCT_ID 0xa4a5 // Linux-USB File-backed Storage Gadget
290
291
292/* 289/*
293 * This driver assumes self-powered hardware and has no way for users to 290 * This driver assumes self-powered hardware and has no way for users to
294 * trigger remote wakeup. It uses autoconfiguration to select endpoints 291 * trigger remote wakeup. It uses autoconfiguration to select endpoints
@@ -298,54 +295,12 @@ MODULE_LICENSE("Dual BSD/GPL");
298 295
299/*-------------------------------------------------------------------------*/ 296/*-------------------------------------------------------------------------*/
300 297
301#define LDBG(lun,fmt,args...) \
302 dev_dbg(&(lun)->dev , fmt , ## args)
303#define MDBG(fmt,args...) \
304 pr_debug(DRIVER_NAME ": " fmt , ## args)
305
306#ifndef DEBUG
307#undef VERBOSE_DEBUG
308#undef DUMP_MSGS
309#endif /* !DEBUG */
310
311#ifdef VERBOSE_DEBUG
312#define VLDBG LDBG
313#else
314#define VLDBG(lun,fmt,args...) \
315 do { } while (0)
316#endif /* VERBOSE_DEBUG */
317
318#define LERROR(lun,fmt,args...) \
319 dev_err(&(lun)->dev , fmt , ## args)
320#define LWARN(lun,fmt,args...) \
321 dev_warn(&(lun)->dev , fmt , ## args)
322#define LINFO(lun,fmt,args...) \
323 dev_info(&(lun)->dev , fmt , ## args)
324
325#define MINFO(fmt,args...) \
326 pr_info(DRIVER_NAME ": " fmt , ## args)
327
328#define DBG(d, fmt, args...) \
329 dev_dbg(&(d)->gadget->dev , fmt , ## args)
330#define VDBG(d, fmt, args...) \
331 dev_vdbg(&(d)->gadget->dev , fmt , ## args)
332#define ERROR(d, fmt, args...) \
333 dev_err(&(d)->gadget->dev , fmt , ## args)
334#define WARNING(d, fmt, args...) \
335 dev_warn(&(d)->gadget->dev , fmt , ## args)
336#define INFO(d, fmt, args...) \
337 dev_info(&(d)->gadget->dev , fmt , ## args)
338
339
340/*-------------------------------------------------------------------------*/
341 298
342/* Encapsulate the module parameter settings */ 299/* Encapsulate the module parameter settings */
343 300
344#define MAX_LUNS 8
345
346static struct { 301static struct {
347 char *file[MAX_LUNS]; 302 char *file[FSG_MAX_LUNS];
348 int ro[MAX_LUNS]; 303 int ro[FSG_MAX_LUNS];
349 unsigned int num_filenames; 304 unsigned int num_filenames;
350 unsigned int num_ros; 305 unsigned int num_ros;
351 unsigned int nluns; 306 unsigned int nluns;
@@ -372,8 +327,8 @@ static struct {
372 .removable = 0, 327 .removable = 0,
373 .can_stall = 1, 328 .can_stall = 1,
374 .cdrom = 0, 329 .cdrom = 0,
375 .vendor = DRIVER_VENDOR_ID, 330 .vendor = FSG_VENDOR_ID,
376 .product = DRIVER_PRODUCT_ID, 331 .product = FSG_PRODUCT_ID,
377 .release = 0xffff, // Use controller chip type 332 .release = 0xffff, // Use controller chip type
378 .buflen = 16384, 333 .buflen = 16384,
379 }; 334 };
@@ -425,125 +380,6 @@ MODULE_PARM_DESC(buflen, "I/O buffer size");
425#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 380#endif /* CONFIG_USB_FILE_STORAGE_TEST */
426 381
427 382
428/*-------------------------------------------------------------------------*/
429
430/* SCSI device types */
431#define TYPE_DISK 0x00
432#define TYPE_CDROM 0x05
433
434/* USB protocol value = the transport method */
435#define USB_PR_CBI 0x00 // Control/Bulk/Interrupt
436#define USB_PR_CB 0x01 // Control/Bulk w/o interrupt
437#define USB_PR_BULK 0x50 // Bulk-only
438
439/* USB subclass value = the protocol encapsulation */
440#define USB_SC_RBC 0x01 // Reduced Block Commands (flash)
441#define USB_SC_8020 0x02 // SFF-8020i, MMC-2, ATAPI (CD-ROM)
442#define USB_SC_QIC 0x03 // QIC-157 (tape)
443#define USB_SC_UFI 0x04 // UFI (floppy)
444#define USB_SC_8070 0x05 // SFF-8070i (removable)
445#define USB_SC_SCSI 0x06 // Transparent SCSI
446
447/* Bulk-only data structures */
448
449/* Command Block Wrapper */
450struct bulk_cb_wrap {
451 __le32 Signature; // Contains 'USBC'
452 u32 Tag; // Unique per command id
453 __le32 DataTransferLength; // Size of the data
454 u8 Flags; // Direction in bit 7
455 u8 Lun; // LUN (normally 0)
456 u8 Length; // Of the CDB, <= MAX_COMMAND_SIZE
457 u8 CDB[16]; // Command Data Block
458};
459
460#define USB_BULK_CB_WRAP_LEN 31
461#define USB_BULK_CB_SIG 0x43425355 // Spells out USBC
462#define USB_BULK_IN_FLAG 0x80
463
464/* Command Status Wrapper */
465struct bulk_cs_wrap {
466 __le32 Signature; // Should = 'USBS'
467 u32 Tag; // Same as original command
468 __le32 Residue; // Amount not transferred
469 u8 Status; // See below
470};
471
472#define USB_BULK_CS_WRAP_LEN 13
473#define USB_BULK_CS_SIG 0x53425355 // Spells out 'USBS'
474#define USB_STATUS_PASS 0
475#define USB_STATUS_FAIL 1
476#define USB_STATUS_PHASE_ERROR 2
477
478/* Bulk-only class specific requests */
479#define USB_BULK_RESET_REQUEST 0xff
480#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
481
482
483/* CBI Interrupt data structure */
484struct interrupt_data {
485 u8 bType;
486 u8 bValue;
487};
488
489#define CBI_INTERRUPT_DATA_LEN 2
490
491/* CBI Accept Device-Specific Command request */
492#define USB_CBI_ADSC_REQUEST 0x00
493
494
495#define MAX_COMMAND_SIZE 16 // Length of a SCSI Command Data Block
496
497/* SCSI commands that we recognize */
498#define SC_FORMAT_UNIT 0x04
499#define SC_INQUIRY 0x12
500#define SC_MODE_SELECT_6 0x15
501#define SC_MODE_SELECT_10 0x55
502#define SC_MODE_SENSE_6 0x1a
503#define SC_MODE_SENSE_10 0x5a
504#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
505#define SC_READ_6 0x08
506#define SC_READ_10 0x28
507#define SC_READ_12 0xa8
508#define SC_READ_CAPACITY 0x25
509#define SC_READ_FORMAT_CAPACITIES 0x23
510#define SC_READ_HEADER 0x44
511#define SC_READ_TOC 0x43
512#define SC_RELEASE 0x17
513#define SC_REQUEST_SENSE 0x03
514#define SC_RESERVE 0x16
515#define SC_SEND_DIAGNOSTIC 0x1d
516#define SC_START_STOP_UNIT 0x1b
517#define SC_SYNCHRONIZE_CACHE 0x35
518#define SC_TEST_UNIT_READY 0x00
519#define SC_VERIFY 0x2f
520#define SC_WRITE_6 0x0a
521#define SC_WRITE_10 0x2a
522#define SC_WRITE_12 0xaa
523
524/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
525#define SS_NO_SENSE 0
526#define SS_COMMUNICATION_FAILURE 0x040800
527#define SS_INVALID_COMMAND 0x052000
528#define SS_INVALID_FIELD_IN_CDB 0x052400
529#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
530#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
531#define SS_MEDIUM_NOT_PRESENT 0x023a00
532#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
533#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
534#define SS_RESET_OCCURRED 0x062900
535#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
536#define SS_UNRECOVERED_READ_ERROR 0x031100
537#define SS_WRITE_ERROR 0x030c02
538#define SS_WRITE_PROTECTED 0x072700
539
540#define SK(x) ((u8) ((x) >> 16)) // Sense Key byte, etc.
541#define ASC(x) ((u8) ((x) >> 8))
542#define ASCQ(x) ((u8) (x))
543
544
545/*-------------------------------------------------------------------------*/
546
547/* 383/*
548 * These definitions will permit the compiler to avoid generating code for 384 * These definitions will permit the compiler to avoid generating code for
549 * parts of the driver that aren't used in the non-TEST version. Even gcc 385 * parts of the driver that aren't used in the non-TEST version. Even gcc
@@ -566,81 +402,8 @@ struct interrupt_data {
566#endif /* CONFIG_USB_FILE_STORAGE_TEST */ 402#endif /* CONFIG_USB_FILE_STORAGE_TEST */
567 403
568 404
569struct lun { 405/*-------------------------------------------------------------------------*/
570 struct file *filp;
571 loff_t file_length;
572 loff_t num_sectors;
573
574 unsigned int ro : 1;
575 unsigned int prevent_medium_removal : 1;
576 unsigned int registered : 1;
577 unsigned int info_valid : 1;
578
579 u32 sense_data;
580 u32 sense_data_info;
581 u32 unit_attention_data;
582
583 struct device dev;
584};
585
586#define backing_file_is_open(curlun) ((curlun)->filp != NULL)
587
588static struct lun *dev_to_lun(struct device *dev)
589{
590 return container_of(dev, struct lun, dev);
591}
592
593
594/* Big enough to hold our biggest descriptor */
595#define EP0_BUFSIZE 256
596#define DELAYED_STATUS (EP0_BUFSIZE + 999) // An impossibly large value
597
598/* Number of buffers we will use. 2 is enough for double-buffering */
599#define NUM_BUFFERS 2
600
601enum fsg_buffer_state {
602 BUF_STATE_EMPTY = 0,
603 BUF_STATE_FULL,
604 BUF_STATE_BUSY
605};
606
607struct fsg_buffhd {
608 void *buf;
609 enum fsg_buffer_state state;
610 struct fsg_buffhd *next;
611
612 /* The NetChip 2280 is faster, and handles some protocol faults
613 * better, if we don't submit any short bulk-out read requests.
614 * So we will record the intended request length here. */
615 unsigned int bulk_out_intended_length;
616
617 struct usb_request *inreq;
618 int inreq_busy;
619 struct usb_request *outreq;
620 int outreq_busy;
621};
622
623enum fsg_state {
624 FSG_STATE_COMMAND_PHASE = -10, // This one isn't used anywhere
625 FSG_STATE_DATA_PHASE,
626 FSG_STATE_STATUS_PHASE,
627
628 FSG_STATE_IDLE = 0,
629 FSG_STATE_ABORT_BULK_OUT,
630 FSG_STATE_RESET,
631 FSG_STATE_INTERFACE_CHANGE,
632 FSG_STATE_CONFIG_CHANGE,
633 FSG_STATE_DISCONNECT,
634 FSG_STATE_EXIT,
635 FSG_STATE_TERMINATED
636};
637 406
638enum data_direction {
639 DATA_DIR_UNKNOWN = 0,
640 DATA_DIR_FROM_HOST,
641 DATA_DIR_TO_HOST,
642 DATA_DIR_NONE
643};
644 407
645struct fsg_dev { 408struct fsg_dev {
646 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */ 409 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
@@ -662,7 +425,7 @@ struct fsg_dev {
662 int intreq_busy; 425 int intreq_busy;
663 struct fsg_buffhd *intr_buffhd; 426 struct fsg_buffhd *intr_buffhd;
664 427
665 unsigned int bulk_out_maxpacket; 428 unsigned int bulk_out_maxpacket;
666 enum fsg_state state; // For exception handling 429 enum fsg_state state; // For exception handling
667 unsigned int exception_req_tag; 430 unsigned int exception_req_tag;
668 431
@@ -687,7 +450,7 @@ struct fsg_dev {
687 450
688 struct fsg_buffhd *next_buffhd_to_fill; 451 struct fsg_buffhd *next_buffhd_to_fill;
689 struct fsg_buffhd *next_buffhd_to_drain; 452 struct fsg_buffhd *next_buffhd_to_drain;
690 struct fsg_buffhd buffhds[NUM_BUFFERS]; 453 struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
691 454
692 int thread_wakeup_needed; 455 int thread_wakeup_needed;
693 struct completion thread_notifier; 456 struct completion thread_notifier;
@@ -712,8 +475,8 @@ struct fsg_dev {
712 u8 cbbuf_cmnd[MAX_COMMAND_SIZE]; 475 u8 cbbuf_cmnd[MAX_COMMAND_SIZE];
713 476
714 unsigned int nluns; 477 unsigned int nluns;
715 struct lun *luns; 478 struct fsg_lun *luns;
716 struct lun *curlun; 479 struct fsg_lun *curlun;
717}; 480};
718 481
719typedef void (*fsg_routine_t)(struct fsg_dev *); 482typedef void (*fsg_routine_t)(struct fsg_dev *);
@@ -739,49 +502,9 @@ static void set_bulk_out_req_length(struct fsg_dev *fsg,
739static struct fsg_dev *the_fsg; 502static struct fsg_dev *the_fsg;
740static struct usb_gadget_driver fsg_driver; 503static struct usb_gadget_driver fsg_driver;
741 504
742static void close_backing_file(struct lun *curlun);
743
744 505
745/*-------------------------------------------------------------------------*/ 506/*-------------------------------------------------------------------------*/
746 507
747#ifdef DUMP_MSGS
748
749static void dump_msg(struct fsg_dev *fsg, const char *label,
750 const u8 *buf, unsigned int length)
751{
752 if (length < 512) {
753 DBG(fsg, "%s, length %u:\n", label, length);
754 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
755 16, 1, buf, length, 0);
756 }
757}
758
759static void dump_cdb(struct fsg_dev *fsg)
760{}
761
762#else
763
764static void dump_msg(struct fsg_dev *fsg, const char *label,
765 const u8 *buf, unsigned int length)
766{}
767
768#ifdef VERBOSE_DEBUG
769
770static void dump_cdb(struct fsg_dev *fsg)
771{
772 print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE,
773 16, 1, fsg->cmnd, fsg->cmnd_size, 0);
774}
775
776#else
777
778static void dump_cdb(struct fsg_dev *fsg)
779{}
780
781#endif /* VERBOSE_DEBUG */
782#endif /* DUMP_MSGS */
783
784
785static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) 508static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
786{ 509{
787 const char *name; 510 const char *name;
@@ -799,26 +522,11 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
799 522
800/*-------------------------------------------------------------------------*/ 523/*-------------------------------------------------------------------------*/
801 524
802/* Routines for unaligned data access */
803
804static u32 get_unaligned_be24(u8 *buf)
805{
806 return 0xffffff & (u32) get_unaligned_be32(buf - 1);
807}
808
809
810/*-------------------------------------------------------------------------*/
811
812/* 525/*
813 * DESCRIPTORS ... most are static, but strings and (full) configuration 526 * DESCRIPTORS ... most are static, but strings and (full) configuration
814 * descriptors are built on demand. Also the (static) config and interface 527 * descriptors are built on demand. Also the (static) config and interface
815 * descriptors are adjusted during fsg_bind(). 528 * descriptors are adjusted during fsg_bind().
816 */ 529 */
817#define STRING_MANUFACTURER 1
818#define STRING_PRODUCT 2
819#define STRING_SERIAL 3
820#define STRING_CONFIG 4
821#define STRING_INTERFACE 5
822 530
823/* There is only one configuration. */ 531/* There is only one configuration. */
824#define CONFIG_VALUE 1 532#define CONFIG_VALUE 1
@@ -832,13 +540,13 @@ device_desc = {
832 .bDeviceClass = USB_CLASS_PER_INTERFACE, 540 .bDeviceClass = USB_CLASS_PER_INTERFACE,
833 541
834 /* The next three values can be overridden by module parameters */ 542 /* The next three values can be overridden by module parameters */
835 .idVendor = cpu_to_le16(DRIVER_VENDOR_ID), 543 .idVendor = cpu_to_le16(FSG_VENDOR_ID),
836 .idProduct = cpu_to_le16(DRIVER_PRODUCT_ID), 544 .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
837 .bcdDevice = cpu_to_le16(0xffff), 545 .bcdDevice = cpu_to_le16(0xffff),
838 546
839 .iManufacturer = STRING_MANUFACTURER, 547 .iManufacturer = FSG_STRING_MANUFACTURER,
840 .iProduct = STRING_PRODUCT, 548 .iProduct = FSG_STRING_PRODUCT,
841 .iSerialNumber = STRING_SERIAL, 549 .iSerialNumber = FSG_STRING_SERIAL,
842 .bNumConfigurations = 1, 550 .bNumConfigurations = 1,
843}; 551};
844 552
@@ -850,86 +558,12 @@ config_desc = {
850 /* wTotalLength computed by usb_gadget_config_buf() */ 558 /* wTotalLength computed by usb_gadget_config_buf() */
851 .bNumInterfaces = 1, 559 .bNumInterfaces = 1,
852 .bConfigurationValue = CONFIG_VALUE, 560 .bConfigurationValue = CONFIG_VALUE,
853 .iConfiguration = STRING_CONFIG, 561 .iConfiguration = FSG_STRING_CONFIG,
854 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER, 562 .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
855 .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2, 563 .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
856}; 564};
857 565
858static struct usb_otg_descriptor
859otg_desc = {
860 .bLength = sizeof(otg_desc),
861 .bDescriptorType = USB_DT_OTG,
862
863 .bmAttributes = USB_OTG_SRP,
864};
865
866/* There is only one interface. */
867 566
868static struct usb_interface_descriptor
869intf_desc = {
870 .bLength = sizeof intf_desc,
871 .bDescriptorType = USB_DT_INTERFACE,
872
873 .bNumEndpoints = 2, // Adjusted during fsg_bind()
874 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
875 .bInterfaceSubClass = USB_SC_SCSI, // Adjusted during fsg_bind()
876 .bInterfaceProtocol = USB_PR_BULK, // Adjusted during fsg_bind()
877 .iInterface = STRING_INTERFACE,
878};
879
880/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
881 * and interrupt-in. */
882
883static struct usb_endpoint_descriptor
884fs_bulk_in_desc = {
885 .bLength = USB_DT_ENDPOINT_SIZE,
886 .bDescriptorType = USB_DT_ENDPOINT,
887
888 .bEndpointAddress = USB_DIR_IN,
889 .bmAttributes = USB_ENDPOINT_XFER_BULK,
890 /* wMaxPacketSize set by autoconfiguration */
891};
892
893static struct usb_endpoint_descriptor
894fs_bulk_out_desc = {
895 .bLength = USB_DT_ENDPOINT_SIZE,
896 .bDescriptorType = USB_DT_ENDPOINT,
897
898 .bEndpointAddress = USB_DIR_OUT,
899 .bmAttributes = USB_ENDPOINT_XFER_BULK,
900 /* wMaxPacketSize set by autoconfiguration */
901};
902
903static struct usb_endpoint_descriptor
904fs_intr_in_desc = {
905 .bLength = USB_DT_ENDPOINT_SIZE,
906 .bDescriptorType = USB_DT_ENDPOINT,
907
908 .bEndpointAddress = USB_DIR_IN,
909 .bmAttributes = USB_ENDPOINT_XFER_INT,
910 .wMaxPacketSize = cpu_to_le16(2),
911 .bInterval = 32, // frames -> 32 ms
912};
913
914static const struct usb_descriptor_header *fs_function[] = {
915 (struct usb_descriptor_header *) &otg_desc,
916 (struct usb_descriptor_header *) &intf_desc,
917 (struct usb_descriptor_header *) &fs_bulk_in_desc,
918 (struct usb_descriptor_header *) &fs_bulk_out_desc,
919 (struct usb_descriptor_header *) &fs_intr_in_desc,
920 NULL,
921};
922#define FS_FUNCTION_PRE_EP_ENTRIES 2
923
924
925/*
926 * USB 2.0 devices need to expose both high speed and full speed
927 * descriptors, unless they only run at full speed.
928 *
929 * That means alternate endpoint descriptors (bigger packets)
930 * and a "device qualifier" ... plus more construction options
931 * for the config descriptor.
932 */
933static struct usb_qualifier_descriptor 567static struct usb_qualifier_descriptor
934dev_qualifier = { 568dev_qualifier = {
935 .bLength = sizeof dev_qualifier, 569 .bLength = sizeof dev_qualifier,
@@ -941,78 +575,6 @@ dev_qualifier = {
941 .bNumConfigurations = 1, 575 .bNumConfigurations = 1,
942}; 576};
943 577
944static struct usb_endpoint_descriptor
945hs_bulk_in_desc = {
946 .bLength = USB_DT_ENDPOINT_SIZE,
947 .bDescriptorType = USB_DT_ENDPOINT,
948
949 /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
950 .bmAttributes = USB_ENDPOINT_XFER_BULK,
951 .wMaxPacketSize = cpu_to_le16(512),
952};
953
954static struct usb_endpoint_descriptor
955hs_bulk_out_desc = {
956 .bLength = USB_DT_ENDPOINT_SIZE,
957 .bDescriptorType = USB_DT_ENDPOINT,
958
959 /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
960 .bmAttributes = USB_ENDPOINT_XFER_BULK,
961 .wMaxPacketSize = cpu_to_le16(512),
962 .bInterval = 1, // NAK every 1 uframe
963};
964
965static struct usb_endpoint_descriptor
966hs_intr_in_desc = {
967 .bLength = USB_DT_ENDPOINT_SIZE,
968 .bDescriptorType = USB_DT_ENDPOINT,
969
970 /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
971 .bmAttributes = USB_ENDPOINT_XFER_INT,
972 .wMaxPacketSize = cpu_to_le16(2),
973 .bInterval = 9, // 2**(9-1) = 256 uframes -> 32 ms
974};
975
976static const struct usb_descriptor_header *hs_function[] = {
977 (struct usb_descriptor_header *) &otg_desc,
978 (struct usb_descriptor_header *) &intf_desc,
979 (struct usb_descriptor_header *) &hs_bulk_in_desc,
980 (struct usb_descriptor_header *) &hs_bulk_out_desc,
981 (struct usb_descriptor_header *) &hs_intr_in_desc,
982 NULL,
983};
984#define HS_FUNCTION_PRE_EP_ENTRIES 2
985
986/* Maxpacket and other transfer characteristics vary by speed. */
987static struct usb_endpoint_descriptor *
988ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
989 struct usb_endpoint_descriptor *hs)
990{
991 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
992 return hs;
993 return fs;
994}
995
996
997/* The CBI specification limits the serial string to 12 uppercase hexadecimal
998 * characters. */
999static char manufacturer[64];
1000static char serial[13];
1001
1002/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
1003static struct usb_string strings[] = {
1004 {STRING_MANUFACTURER, manufacturer},
1005 {STRING_PRODUCT, longname},
1006 {STRING_SERIAL, serial},
1007 {STRING_CONFIG, "Self-powered"},
1008 {STRING_INTERFACE, "Mass Storage"},
1009 {}
1010};
1011
1012static struct usb_gadget_strings stringtab = {
1013 .language = 0x0409, // en-us
1014 .strings = strings,
1015};
1016 578
1017 579
1018/* 580/*
@@ -1032,10 +594,9 @@ static int populate_config_buf(struct usb_gadget *gadget,
1032 594
1033 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG) 595 if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
1034 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed; 596 speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
1035 if (gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH) 597 function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
1036 function = hs_function; 598 ? (const struct usb_descriptor_header **)fsg_hs_function
1037 else 599 : (const struct usb_descriptor_header **)fsg_fs_function;
1038 function = fs_function;
1039 600
1040 /* for now, don't advertise srp-only devices */ 601 /* for now, don't advertise srp-only devices */
1041 if (!gadget_is_otg(gadget)) 602 if (!gadget_is_otg(gadget))
@@ -1386,7 +947,7 @@ get_config:
1386 VDBG(fsg, "get string descriptor\n"); 947 VDBG(fsg, "get string descriptor\n");
1387 948
1388 /* wIndex == language code */ 949 /* wIndex == language code */
1389 value = usb_gadget_get_string(&stringtab, 950 value = usb_gadget_get_string(&fsg_stringtab,
1390 w_value & 0xff, req->buf); 951 w_value & 0xff, req->buf);
1391 break; 952 break;
1392 } 953 }
@@ -1551,7 +1112,7 @@ static int sleep_thread(struct fsg_dev *fsg)
1551 1112
1552static int do_read(struct fsg_dev *fsg) 1113static int do_read(struct fsg_dev *fsg)
1553{ 1114{
1554 struct lun *curlun = fsg->curlun; 1115 struct fsg_lun *curlun = fsg->curlun;
1555 u32 lba; 1116 u32 lba;
1556 struct fsg_buffhd *bh; 1117 struct fsg_buffhd *bh;
1557 int rc; 1118 int rc;
@@ -1677,7 +1238,7 @@ static int do_read(struct fsg_dev *fsg)
1677 1238
1678static int do_write(struct fsg_dev *fsg) 1239static int do_write(struct fsg_dev *fsg)
1679{ 1240{
1680 struct lun *curlun = fsg->curlun; 1241 struct fsg_lun *curlun = fsg->curlun;
1681 u32 lba; 1242 u32 lba;
1682 struct fsg_buffhd *bh; 1243 struct fsg_buffhd *bh;
1683 int get_some_more; 1244 int get_some_more;
@@ -1864,33 +1425,14 @@ static int do_write(struct fsg_dev *fsg)
1864 1425
1865/*-------------------------------------------------------------------------*/ 1426/*-------------------------------------------------------------------------*/
1866 1427
1867/* Sync the file data, don't bother with the metadata.
1868 * This code was copied from fs/buffer.c:sys_fdatasync(). */
1869static int fsync_sub(struct lun *curlun)
1870{
1871 struct file *filp = curlun->filp;
1872
1873 if (curlun->ro || !filp)
1874 return 0;
1875 return vfs_fsync(filp, filp->f_path.dentry, 1);
1876}
1877
1878static void fsync_all(struct fsg_dev *fsg)
1879{
1880 int i;
1881
1882 for (i = 0; i < fsg->nluns; ++i)
1883 fsync_sub(&fsg->luns[i]);
1884}
1885
1886static int do_synchronize_cache(struct fsg_dev *fsg) 1428static int do_synchronize_cache(struct fsg_dev *fsg)
1887{ 1429{
1888 struct lun *curlun = fsg->curlun; 1430 struct fsg_lun *curlun = fsg->curlun;
1889 int rc; 1431 int rc;
1890 1432
1891 /* We ignore the requested LBA and write out all file's 1433 /* We ignore the requested LBA and write out all file's
1892 * dirty data buffers. */ 1434 * dirty data buffers. */
1893 rc = fsync_sub(curlun); 1435 rc = fsg_lun_fsync_sub(curlun);
1894 if (rc) 1436 if (rc)
1895 curlun->sense_data = SS_WRITE_ERROR; 1437 curlun->sense_data = SS_WRITE_ERROR;
1896 return 0; 1438 return 0;
@@ -1899,7 +1441,7 @@ static int do_synchronize_cache(struct fsg_dev *fsg)
1899 1441
1900/*-------------------------------------------------------------------------*/ 1442/*-------------------------------------------------------------------------*/
1901 1443
1902static void invalidate_sub(struct lun *curlun) 1444static void invalidate_sub(struct fsg_lun *curlun)
1903{ 1445{
1904 struct file *filp = curlun->filp; 1446 struct file *filp = curlun->filp;
1905 struct inode *inode = filp->f_path.dentry->d_inode; 1447 struct inode *inode = filp->f_path.dentry->d_inode;
@@ -1911,7 +1453,7 @@ static void invalidate_sub(struct lun *curlun)
1911 1453
1912static int do_verify(struct fsg_dev *fsg) 1454static int do_verify(struct fsg_dev *fsg)
1913{ 1455{
1914 struct lun *curlun = fsg->curlun; 1456 struct fsg_lun *curlun = fsg->curlun;
1915 u32 lba; 1457 u32 lba;
1916 u32 verification_length; 1458 u32 verification_length;
1917 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill; 1459 struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
@@ -1944,7 +1486,7 @@ static int do_verify(struct fsg_dev *fsg)
1944 file_offset = ((loff_t) lba) << 9; 1486 file_offset = ((loff_t) lba) << 9;
1945 1487
1946 /* Write out all the dirty buffers before invalidating them */ 1488 /* Write out all the dirty buffers before invalidating them */
1947 fsync_sub(curlun); 1489 fsg_lun_fsync_sub(curlun);
1948 if (signal_pending(current)) 1490 if (signal_pending(current))
1949 return -EINTR; 1491 return -EINTR;
1950 1492
@@ -2041,7 +1583,7 @@ static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2041 1583
2042static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1584static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2043{ 1585{
2044 struct lun *curlun = fsg->curlun; 1586 struct fsg_lun *curlun = fsg->curlun;
2045 u8 *buf = (u8 *) bh->buf; 1587 u8 *buf = (u8 *) bh->buf;
2046 u32 sd, sdinfo; 1588 u32 sd, sdinfo;
2047 int valid; 1589 int valid;
@@ -2095,7 +1637,7 @@ static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2095 1637
2096static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1638static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2097{ 1639{
2098 struct lun *curlun = fsg->curlun; 1640 struct fsg_lun *curlun = fsg->curlun;
2099 u32 lba = get_unaligned_be32(&fsg->cmnd[2]); 1641 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
2100 int pmi = fsg->cmnd[8]; 1642 int pmi = fsg->cmnd[8];
2101 u8 *buf = (u8 *) bh->buf; 1643 u8 *buf = (u8 *) bh->buf;
@@ -2113,27 +1655,9 @@ static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2113} 1655}
2114 1656
2115 1657
2116static void store_cdrom_address(u8 *dest, int msf, u32 addr)
2117{
2118 if (msf) {
2119 /* Convert to Minutes-Seconds-Frames */
2120 addr >>= 2; /* Convert to 2048-byte frames */
2121 addr += 2*75; /* Lead-in occupies 2 seconds */
2122 dest[3] = addr % 75; /* Frames */
2123 addr /= 75;
2124 dest[2] = addr % 60; /* Seconds */
2125 addr /= 60;
2126 dest[1] = addr; /* Minutes */
2127 dest[0] = 0; /* Reserved */
2128 } else {
2129 /* Absolute sector */
2130 put_unaligned_be32(addr, dest);
2131 }
2132}
2133
2134static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1658static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2135{ 1659{
2136 struct lun *curlun = fsg->curlun; 1660 struct fsg_lun *curlun = fsg->curlun;
2137 int msf = fsg->cmnd[1] & 0x02; 1661 int msf = fsg->cmnd[1] & 0x02;
2138 u32 lba = get_unaligned_be32(&fsg->cmnd[2]); 1662 u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
2139 u8 *buf = (u8 *) bh->buf; 1663 u8 *buf = (u8 *) bh->buf;
@@ -2156,7 +1680,7 @@ static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2156 1680
2157static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1681static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2158{ 1682{
2159 struct lun *curlun = fsg->curlun; 1683 struct fsg_lun *curlun = fsg->curlun;
2160 int msf = fsg->cmnd[1] & 0x02; 1684 int msf = fsg->cmnd[1] & 0x02;
2161 int start_track = fsg->cmnd[6]; 1685 int start_track = fsg->cmnd[6];
2162 u8 *buf = (u8 *) bh->buf; 1686 u8 *buf = (u8 *) bh->buf;
@@ -2184,7 +1708,7 @@ static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2184 1708
2185static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1709static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2186{ 1710{
2187 struct lun *curlun = fsg->curlun; 1711 struct fsg_lun *curlun = fsg->curlun;
2188 int mscmnd = fsg->cmnd[0]; 1712 int mscmnd = fsg->cmnd[0];
2189 u8 *buf = (u8 *) bh->buf; 1713 u8 *buf = (u8 *) bh->buf;
2190 u8 *buf0 = buf; 1714 u8 *buf0 = buf;
@@ -2265,7 +1789,7 @@ static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2265 1789
2266static int do_start_stop(struct fsg_dev *fsg) 1790static int do_start_stop(struct fsg_dev *fsg)
2267{ 1791{
2268 struct lun *curlun = fsg->curlun; 1792 struct fsg_lun *curlun = fsg->curlun;
2269 int loej, start; 1793 int loej, start;
2270 1794
2271 if (!mod_data.removable) { 1795 if (!mod_data.removable) {
@@ -2295,7 +1819,7 @@ static int do_start_stop(struct fsg_dev *fsg)
2295 if (loej) { // Simulate an unload/eject 1819 if (loej) { // Simulate an unload/eject
2296 up_read(&fsg->filesem); 1820 up_read(&fsg->filesem);
2297 down_write(&fsg->filesem); 1821 down_write(&fsg->filesem);
2298 close_backing_file(curlun); 1822 fsg_lun_close(curlun);
2299 up_write(&fsg->filesem); 1823 up_write(&fsg->filesem);
2300 down_read(&fsg->filesem); 1824 down_read(&fsg->filesem);
2301 } 1825 }
@@ -2303,7 +1827,7 @@ static int do_start_stop(struct fsg_dev *fsg)
2303 1827
2304 /* Our emulation doesn't support mounting; the medium is 1828 /* Our emulation doesn't support mounting; the medium is
2305 * available for use as soon as it is loaded. */ 1829 * available for use as soon as it is loaded. */
2306 if (!backing_file_is_open(curlun)) { 1830 if (!fsg_lun_is_open(curlun)) {
2307 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 1831 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2308 return -EINVAL; 1832 return -EINVAL;
2309 } 1833 }
@@ -2315,7 +1839,7 @@ static int do_start_stop(struct fsg_dev *fsg)
2315 1839
2316static int do_prevent_allow(struct fsg_dev *fsg) 1840static int do_prevent_allow(struct fsg_dev *fsg)
2317{ 1841{
2318 struct lun *curlun = fsg->curlun; 1842 struct fsg_lun *curlun = fsg->curlun;
2319 int prevent; 1843 int prevent;
2320 1844
2321 if (!mod_data.removable) { 1845 if (!mod_data.removable) {
@@ -2330,7 +1854,7 @@ static int do_prevent_allow(struct fsg_dev *fsg)
2330 } 1854 }
2331 1855
2332 if (curlun->prevent_medium_removal && !prevent) 1856 if (curlun->prevent_medium_removal && !prevent)
2333 fsync_sub(curlun); 1857 fsg_lun_fsync_sub(curlun);
2334 curlun->prevent_medium_removal = prevent; 1858 curlun->prevent_medium_removal = prevent;
2335 return 0; 1859 return 0;
2336} 1860}
@@ -2339,7 +1863,7 @@ static int do_prevent_allow(struct fsg_dev *fsg)
2339static int do_read_format_capacities(struct fsg_dev *fsg, 1863static int do_read_format_capacities(struct fsg_dev *fsg,
2340 struct fsg_buffhd *bh) 1864 struct fsg_buffhd *bh)
2341{ 1865{
2342 struct lun *curlun = fsg->curlun; 1866 struct fsg_lun *curlun = fsg->curlun;
2343 u8 *buf = (u8 *) bh->buf; 1867 u8 *buf = (u8 *) bh->buf;
2344 1868
2345 buf[0] = buf[1] = buf[2] = 0; 1869 buf[0] = buf[1] = buf[2] = 0;
@@ -2356,7 +1880,7 @@ static int do_read_format_capacities(struct fsg_dev *fsg,
2356 1880
2357static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh) 1881static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2358{ 1882{
2359 struct lun *curlun = fsg->curlun; 1883 struct fsg_lun *curlun = fsg->curlun;
2360 1884
2361 /* We don't support MODE SELECT */ 1885 /* We don't support MODE SELECT */
2362 curlun->sense_data = SS_INVALID_COMMAND; 1886 curlun->sense_data = SS_INVALID_COMMAND;
@@ -2599,7 +2123,7 @@ static int finish_reply(struct fsg_dev *fsg)
2599 2123
2600static int send_status(struct fsg_dev *fsg) 2124static int send_status(struct fsg_dev *fsg)
2601{ 2125{
2602 struct lun *curlun = fsg->curlun; 2126 struct fsg_lun *curlun = fsg->curlun;
2603 struct fsg_buffhd *bh; 2127 struct fsg_buffhd *bh;
2604 int rc; 2128 int rc;
2605 u8 status = USB_STATUS_PASS; 2129 u8 status = USB_STATUS_PASS;
@@ -2691,7 +2215,7 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
2691 int lun = fsg->cmnd[1] >> 5; 2215 int lun = fsg->cmnd[1] >> 5;
2692 static const char dirletter[4] = {'u', 'o', 'i', 'n'}; 2216 static const char dirletter[4] = {'u', 'o', 'i', 'n'};
2693 char hdlen[20]; 2217 char hdlen[20];
2694 struct lun *curlun; 2218 struct fsg_lun *curlun;
2695 2219
2696 /* Adjust the expected cmnd_size for protocol encapsulation padding. 2220 /* Adjust the expected cmnd_size for protocol encapsulation padding.
2697 * Transparent SCSI doesn't pad. */ 2221 * Transparent SCSI doesn't pad. */
@@ -2820,7 +2344,7 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
2820 2344
2821 /* If the medium isn't mounted and the command needs to access 2345 /* If the medium isn't mounted and the command needs to access
2822 * it, return an error. */ 2346 * it, return an error. */
2823 if (curlun && !backing_file_is_open(curlun) && needs_medium) { 2347 if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
2824 curlun->sense_data = SS_MEDIUM_NOT_PRESENT; 2348 curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
2825 return -EINVAL; 2349 return -EINVAL;
2826 } 2350 }
@@ -3075,8 +2599,8 @@ static int do_scsi_command(struct fsg_dev *fsg)
3075 2599
3076static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh) 2600static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
3077{ 2601{
3078 struct usb_request *req = bh->outreq; 2602 struct usb_request *req = bh->outreq;
3079 struct bulk_cb_wrap *cbw = req->buf; 2603 struct fsg_bulk_cb_wrap *cbw = req->buf;
3080 2604
3081 /* Was this a real packet? Should it be ignored? */ 2605 /* Was this a real packet? Should it be ignored? */
3082 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags)) 2606 if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
@@ -3105,7 +2629,7 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
3105 } 2629 }
3106 2630
3107 /* Is the CBW meaningful? */ 2631 /* Is the CBW meaningful? */
3108 if (cbw->Lun >= MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG || 2632 if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
3109 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) { 2633 cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
3110 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, " 2634 DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
3111 "cmdlen %u\n", 2635 "cmdlen %u\n",
@@ -3238,7 +2762,7 @@ static int do_set_interface(struct fsg_dev *fsg, int altsetting)
3238 2762
3239reset: 2763reset:
3240 /* Deallocate the requests */ 2764 /* Deallocate the requests */
3241 for (i = 0; i < NUM_BUFFERS; ++i) { 2765 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
3242 struct fsg_buffhd *bh = &fsg->buffhds[i]; 2766 struct fsg_buffhd *bh = &fsg->buffhds[i];
3243 2767
3244 if (bh->inreq) { 2768 if (bh->inreq) {
@@ -3276,12 +2800,14 @@ reset:
3276 DBG(fsg, "set interface %d\n", altsetting); 2800 DBG(fsg, "set interface %d\n", altsetting);
3277 2801
3278 /* Enable the endpoints */ 2802 /* Enable the endpoints */
3279 d = ep_desc(fsg->gadget, &fs_bulk_in_desc, &hs_bulk_in_desc); 2803 d = fsg_ep_desc(fsg->gadget,
2804 &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
3280 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0) 2805 if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
3281 goto reset; 2806 goto reset;
3282 fsg->bulk_in_enabled = 1; 2807 fsg->bulk_in_enabled = 1;
3283 2808
3284 d = ep_desc(fsg->gadget, &fs_bulk_out_desc, &hs_bulk_out_desc); 2809 d = fsg_ep_desc(fsg->gadget,
2810 &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
3285 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0) 2811 if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
3286 goto reset; 2812 goto reset;
3287 fsg->bulk_out_enabled = 1; 2813 fsg->bulk_out_enabled = 1;
@@ -3289,14 +2815,15 @@ reset:
3289 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags); 2815 clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
3290 2816
3291 if (transport_is_cbi()) { 2817 if (transport_is_cbi()) {
3292 d = ep_desc(fsg->gadget, &fs_intr_in_desc, &hs_intr_in_desc); 2818 d = fsg_ep_desc(fsg->gadget,
2819 &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc);
3293 if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0) 2820 if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
3294 goto reset; 2821 goto reset;
3295 fsg->intr_in_enabled = 1; 2822 fsg->intr_in_enabled = 1;
3296 } 2823 }
3297 2824
3298 /* Allocate the requests */ 2825 /* Allocate the requests */
3299 for (i = 0; i < NUM_BUFFERS; ++i) { 2826 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
3300 struct fsg_buffhd *bh = &fsg->buffhds[i]; 2827 struct fsg_buffhd *bh = &fsg->buffhds[i];
3301 2828
3302 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0) 2829 if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
@@ -3372,7 +2899,7 @@ static void handle_exception(struct fsg_dev *fsg)
3372 struct fsg_buffhd *bh; 2899 struct fsg_buffhd *bh;
3373 enum fsg_state old_state; 2900 enum fsg_state old_state;
3374 u8 new_config; 2901 u8 new_config;
3375 struct lun *curlun; 2902 struct fsg_lun *curlun;
3376 unsigned int exception_req_tag; 2903 unsigned int exception_req_tag;
3377 int rc; 2904 int rc;
3378 2905
@@ -3392,7 +2919,7 @@ static void handle_exception(struct fsg_dev *fsg)
3392 /* Cancel all the pending transfers */ 2919 /* Cancel all the pending transfers */
3393 if (fsg->intreq_busy) 2920 if (fsg->intreq_busy)
3394 usb_ep_dequeue(fsg->intr_in, fsg->intreq); 2921 usb_ep_dequeue(fsg->intr_in, fsg->intreq);
3395 for (i = 0; i < NUM_BUFFERS; ++i) { 2922 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
3396 bh = &fsg->buffhds[i]; 2923 bh = &fsg->buffhds[i];
3397 if (bh->inreq_busy) 2924 if (bh->inreq_busy)
3398 usb_ep_dequeue(fsg->bulk_in, bh->inreq); 2925 usb_ep_dequeue(fsg->bulk_in, bh->inreq);
@@ -3403,7 +2930,7 @@ static void handle_exception(struct fsg_dev *fsg)
3403 /* Wait until everything is idle */ 2930 /* Wait until everything is idle */
3404 for (;;) { 2931 for (;;) {
3405 num_active = fsg->intreq_busy; 2932 num_active = fsg->intreq_busy;
3406 for (i = 0; i < NUM_BUFFERS; ++i) { 2933 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
3407 bh = &fsg->buffhds[i]; 2934 bh = &fsg->buffhds[i];
3408 num_active += bh->inreq_busy + bh->outreq_busy; 2935 num_active += bh->inreq_busy + bh->outreq_busy;
3409 } 2936 }
@@ -3425,7 +2952,7 @@ static void handle_exception(struct fsg_dev *fsg)
3425 * state, and the exception. Then invoke the handler. */ 2952 * state, and the exception. Then invoke the handler. */
3426 spin_lock_irq(&fsg->lock); 2953 spin_lock_irq(&fsg->lock);
3427 2954
3428 for (i = 0; i < NUM_BUFFERS; ++i) { 2955 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
3429 bh = &fsg->buffhds[i]; 2956 bh = &fsg->buffhds[i];
3430 bh->state = BUF_STATE_EMPTY; 2957 bh->state = BUF_STATE_EMPTY;
3431 } 2958 }
@@ -3506,7 +3033,8 @@ static void handle_exception(struct fsg_dev *fsg)
3506 break; 3033 break;
3507 3034
3508 case FSG_STATE_DISCONNECT: 3035 case FSG_STATE_DISCONNECT:
3509 fsync_all(fsg); 3036 for (i = 0; i < fsg->nluns; ++i)
3037 fsg_lun_fsync_sub(fsg->luns + i);
3510 do_set_config(fsg, 0); // Unconfigured state 3038 do_set_config(fsg, 0); // Unconfigured state
3511 break; 3039 break;
3512 3040
@@ -3595,201 +3123,10 @@ static int fsg_main_thread(void *fsg_)
3595 3123
3596/*-------------------------------------------------------------------------*/ 3124/*-------------------------------------------------------------------------*/
3597 3125
3598/* If the next two routines are called while the gadget is registered,
3599 * the caller must own fsg->filesem for writing. */
3600
3601static int open_backing_file(struct lun *curlun, const char *filename)
3602{
3603 int ro;
3604 struct file *filp = NULL;
3605 int rc = -EINVAL;
3606 struct inode *inode = NULL;
3607 loff_t size;
3608 loff_t num_sectors;
3609 loff_t min_sectors;
3610
3611 /* R/W if we can, R/O if we must */
3612 ro = curlun->ro;
3613 if (!ro) {
3614 filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
3615 if (-EROFS == PTR_ERR(filp))
3616 ro = 1;
3617 }
3618 if (ro)
3619 filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
3620 if (IS_ERR(filp)) {
3621 LINFO(curlun, "unable to open backing file: %s\n", filename);
3622 return PTR_ERR(filp);
3623 }
3624
3625 if (!(filp->f_mode & FMODE_WRITE))
3626 ro = 1;
3627
3628 if (filp->f_path.dentry)
3629 inode = filp->f_path.dentry->d_inode;
3630 if (inode && S_ISBLK(inode->i_mode)) {
3631 if (bdev_read_only(inode->i_bdev))
3632 ro = 1;
3633 } else if (!inode || !S_ISREG(inode->i_mode)) {
3634 LINFO(curlun, "invalid file type: %s\n", filename);
3635 goto out;
3636 }
3637
3638 /* If we can't read the file, it's no good.
3639 * If we can't write the file, use it read-only. */
3640 if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
3641 LINFO(curlun, "file not readable: %s\n", filename);
3642 goto out;
3643 }
3644 if (!(filp->f_op->write || filp->f_op->aio_write))
3645 ro = 1;
3646
3647 size = i_size_read(inode->i_mapping->host);
3648 if (size < 0) {
3649 LINFO(curlun, "unable to find file size: %s\n", filename);
3650 rc = (int) size;
3651 goto out;
3652 }
3653 num_sectors = size >> 9; // File size in 512-byte blocks
3654 min_sectors = 1;
3655 if (mod_data.cdrom) {
3656 num_sectors &= ~3; // Reduce to a multiple of 2048
3657 min_sectors = 300*4; // Smallest track is 300 frames
3658 if (num_sectors >= 256*60*75*4) {
3659 num_sectors = (256*60*75 - 1) * 4;
3660 LINFO(curlun, "file too big: %s\n", filename);
3661 LINFO(curlun, "using only first %d blocks\n",
3662 (int) num_sectors);
3663 }
3664 }
3665 if (num_sectors < min_sectors) {
3666 LINFO(curlun, "file too small: %s\n", filename);
3667 rc = -ETOOSMALL;
3668 goto out;
3669 }
3670
3671 get_file(filp);
3672 curlun->ro = ro;
3673 curlun->filp = filp;
3674 curlun->file_length = size;
3675 curlun->num_sectors = num_sectors;
3676 LDBG(curlun, "open backing file: %s\n", filename);
3677 rc = 0;
3678
3679out:
3680 filp_close(filp, current->files);
3681 return rc;
3682}
3683
3684
3685static void close_backing_file(struct lun *curlun)
3686{
3687 if (curlun->filp) {
3688 LDBG(curlun, "close backing file\n");
3689 fput(curlun->filp);
3690 curlun->filp = NULL;
3691 }
3692}
3693
3694
3695static ssize_t show_ro(struct device *dev, struct device_attribute *attr, char *buf)
3696{
3697 struct lun *curlun = dev_to_lun(dev);
3698
3699 return sprintf(buf, "%d\n", curlun->ro);
3700}
3701
3702static ssize_t show_file(struct device *dev, struct device_attribute *attr,
3703 char *buf)
3704{
3705 struct lun *curlun = dev_to_lun(dev);
3706 struct fsg_dev *fsg = dev_get_drvdata(dev);
3707 char *p;
3708 ssize_t rc;
3709
3710 down_read(&fsg->filesem);
3711 if (backing_file_is_open(curlun)) { // Get the complete pathname
3712 p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
3713 if (IS_ERR(p))
3714 rc = PTR_ERR(p);
3715 else {
3716 rc = strlen(p);
3717 memmove(buf, p, rc);
3718 buf[rc] = '\n'; // Add a newline
3719 buf[++rc] = 0;
3720 }
3721 } else { // No file, return 0 bytes
3722 *buf = 0;
3723 rc = 0;
3724 }
3725 up_read(&fsg->filesem);
3726 return rc;
3727}
3728
3729
3730static ssize_t store_ro(struct device *dev, struct device_attribute *attr,
3731 const char *buf, size_t count)
3732{
3733 ssize_t rc = count;
3734 struct lun *curlun = dev_to_lun(dev);
3735 struct fsg_dev *fsg = dev_get_drvdata(dev);
3736 int i;
3737
3738 if (sscanf(buf, "%d", &i) != 1)
3739 return -EINVAL;
3740
3741 /* Allow the write-enable status to change only while the backing file
3742 * is closed. */
3743 down_read(&fsg->filesem);
3744 if (backing_file_is_open(curlun)) {
3745 LDBG(curlun, "read-only status change prevented\n");
3746 rc = -EBUSY;
3747 } else {
3748 curlun->ro = !!i;
3749 LDBG(curlun, "read-only status set to %d\n", curlun->ro);
3750 }
3751 up_read(&fsg->filesem);
3752 return rc;
3753}
3754
3755static ssize_t store_file(struct device *dev, struct device_attribute *attr,
3756 const char *buf, size_t count)
3757{
3758 struct lun *curlun = dev_to_lun(dev);
3759 struct fsg_dev *fsg = dev_get_drvdata(dev);
3760 int rc = 0;
3761
3762 if (curlun->prevent_medium_removal && backing_file_is_open(curlun)) {
3763 LDBG(curlun, "eject attempt prevented\n");
3764 return -EBUSY; // "Door is locked"
3765 }
3766
3767 /* Remove a trailing newline */
3768 if (count > 0 && buf[count-1] == '\n')
3769 ((char *) buf)[count-1] = 0; // Ugh!
3770
3771 /* Eject current medium */
3772 down_write(&fsg->filesem);
3773 if (backing_file_is_open(curlun)) {
3774 close_backing_file(curlun);
3775 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
3776 }
3777
3778 /* Load new medium */
3779 if (count > 0 && buf[0]) {
3780 rc = open_backing_file(curlun, buf);
3781 if (rc == 0)
3782 curlun->unit_attention_data =
3783 SS_NOT_READY_TO_READY_TRANSITION;
3784 }
3785 up_write(&fsg->filesem);
3786 return (rc < 0 ? rc : count);
3787}
3788
3789 3126
3790/* The write permissions and store_xxx pointers are set in fsg_bind() */ 3127/* The write permissions and store_xxx pointers are set in fsg_bind() */
3791static DEVICE_ATTR(ro, 0444, show_ro, NULL); 3128static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
3792static DEVICE_ATTR(file, 0444, show_file, NULL); 3129static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
3793 3130
3794 3131
3795/*-------------------------------------------------------------------------*/ 3132/*-------------------------------------------------------------------------*/
@@ -3804,7 +3141,9 @@ static void fsg_release(struct kref *ref)
3804 3141
3805static void lun_release(struct device *dev) 3142static void lun_release(struct device *dev)
3806{ 3143{
3807 struct fsg_dev *fsg = dev_get_drvdata(dev); 3144 struct rw_semaphore *filesem = dev_get_drvdata(dev);
3145 struct fsg_dev *fsg =
3146 container_of(filesem, struct fsg_dev, filesem);
3808 3147
3809 kref_put(&fsg->ref, fsg_release); 3148 kref_put(&fsg->ref, fsg_release);
3810} 3149}
@@ -3813,7 +3152,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
3813{ 3152{
3814 struct fsg_dev *fsg = get_gadget_data(gadget); 3153 struct fsg_dev *fsg = get_gadget_data(gadget);
3815 int i; 3154 int i;
3816 struct lun *curlun; 3155 struct fsg_lun *curlun;
3817 struct usb_request *req = fsg->ep0req; 3156 struct usb_request *req = fsg->ep0req;
3818 3157
3819 DBG(fsg, "unbind\n"); 3158 DBG(fsg, "unbind\n");
@@ -3825,7 +3164,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
3825 if (curlun->registered) { 3164 if (curlun->registered) {
3826 device_remove_file(&curlun->dev, &dev_attr_ro); 3165 device_remove_file(&curlun->dev, &dev_attr_ro);
3827 device_remove_file(&curlun->dev, &dev_attr_file); 3166 device_remove_file(&curlun->dev, &dev_attr_file);
3828 close_backing_file(curlun); 3167 fsg_lun_close(curlun);
3829 device_unregister(&curlun->dev); 3168 device_unregister(&curlun->dev);
3830 curlun->registered = 0; 3169 curlun->registered = 0;
3831 } 3170 }
@@ -3841,7 +3180,7 @@ static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
3841 } 3180 }
3842 3181
3843 /* Free the data buffers */ 3182 /* Free the data buffers */
3844 for (i = 0; i < NUM_BUFFERS; ++i) 3183 for (i = 0; i < FSG_NUM_BUFFERS; ++i)
3845 kfree(fsg->buffhds[i].buf); 3184 kfree(fsg->buffhds[i].buf);
3846 3185
3847 /* Free the request and buffer for endpoint 0 */ 3186 /* Free the request and buffer for endpoint 0 */
@@ -3948,7 +3287,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3948 struct fsg_dev *fsg = the_fsg; 3287 struct fsg_dev *fsg = the_fsg;
3949 int rc; 3288 int rc;
3950 int i; 3289 int i;
3951 struct lun *curlun; 3290 struct fsg_lun *curlun;
3952 struct usb_ep *ep; 3291 struct usb_ep *ep;
3953 struct usb_request *req; 3292 struct usb_request *req;
3954 char *pathbuf, *p; 3293 char *pathbuf, *p;
@@ -3963,10 +3302,10 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3963 3302
3964 if (mod_data.removable) { // Enable the store_xxx attributes 3303 if (mod_data.removable) { // Enable the store_xxx attributes
3965 dev_attr_file.attr.mode = 0644; 3304 dev_attr_file.attr.mode = 0644;
3966 dev_attr_file.store = store_file; 3305 dev_attr_file.store = fsg_store_file;
3967 if (!mod_data.cdrom) { 3306 if (!mod_data.cdrom) {
3968 dev_attr_ro.attr.mode = 0644; 3307 dev_attr_ro.attr.mode = 0644;
3969 dev_attr_ro.store = store_ro; 3308 dev_attr_ro.store = fsg_store_ro;
3970 } 3309 }
3971 } 3310 }
3972 3311
@@ -3974,7 +3313,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3974 i = mod_data.nluns; 3313 i = mod_data.nluns;
3975 if (i == 0) 3314 if (i == 0)
3976 i = max(mod_data.num_filenames, 1u); 3315 i = max(mod_data.num_filenames, 1u);
3977 if (i > MAX_LUNS) { 3316 if (i > FSG_MAX_LUNS) {
3978 ERROR(fsg, "invalid number of LUNs: %d\n", i); 3317 ERROR(fsg, "invalid number of LUNs: %d\n", i);
3979 rc = -EINVAL; 3318 rc = -EINVAL;
3980 goto out; 3319 goto out;
@@ -3982,7 +3321,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3982 3321
3983 /* Create the LUNs, open their backing files, and register the 3322 /* Create the LUNs, open their backing files, and register the
3984 * LUN devices in sysfs. */ 3323 * LUN devices in sysfs. */
3985 fsg->luns = kzalloc(i * sizeof(struct lun), GFP_KERNEL); 3324 fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
3986 if (!fsg->luns) { 3325 if (!fsg->luns) {
3987 rc = -ENOMEM; 3326 rc = -ENOMEM;
3988 goto out; 3327 goto out;
@@ -3991,13 +3330,14 @@ static int __init fsg_bind(struct usb_gadget *gadget)
3991 3330
3992 for (i = 0; i < fsg->nluns; ++i) { 3331 for (i = 0; i < fsg->nluns; ++i) {
3993 curlun = &fsg->luns[i]; 3332 curlun = &fsg->luns[i];
3994 curlun->ro = mod_data.ro[i]; 3333 curlun->cdrom = !!mod_data.cdrom;
3995 if (mod_data.cdrom) 3334 curlun->ro = mod_data.cdrom || mod_data.ro[i];
3996 curlun->ro = 1; 3335 curlun->initially_ro = curlun->ro;
3336 curlun->removable = mod_data.removable;
3997 curlun->dev.release = lun_release; 3337 curlun->dev.release = lun_release;
3998 curlun->dev.parent = &gadget->dev; 3338 curlun->dev.parent = &gadget->dev;
3999 curlun->dev.driver = &fsg_driver.driver; 3339 curlun->dev.driver = &fsg_driver.driver;
4000 dev_set_drvdata(&curlun->dev, fsg); 3340 dev_set_drvdata(&curlun->dev, &fsg->filesem);
4001 dev_set_name(&curlun->dev,"%s-lun%d", 3341 dev_set_name(&curlun->dev,"%s-lun%d",
4002 dev_name(&gadget->dev), i); 3342 dev_name(&gadget->dev), i);
4003 3343
@@ -4016,7 +3356,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
4016 kref_get(&fsg->ref); 3356 kref_get(&fsg->ref);
4017 3357
4018 if (mod_data.file[i] && *mod_data.file[i]) { 3358 if (mod_data.file[i] && *mod_data.file[i]) {
4019 if ((rc = open_backing_file(curlun, 3359 if ((rc = fsg_lun_open(curlun,
4020 mod_data.file[i])) != 0) 3360 mod_data.file[i])) != 0)
4021 goto out; 3361 goto out;
4022 } else if (!mod_data.removable) { 3362 } else if (!mod_data.removable) {
@@ -4028,20 +3368,20 @@ static int __init fsg_bind(struct usb_gadget *gadget)
4028 3368
4029 /* Find all the endpoints we will use */ 3369 /* Find all the endpoints we will use */
4030 usb_ep_autoconfig_reset(gadget); 3370 usb_ep_autoconfig_reset(gadget);
4031 ep = usb_ep_autoconfig(gadget, &fs_bulk_in_desc); 3371 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
4032 if (!ep) 3372 if (!ep)
4033 goto autoconf_fail; 3373 goto autoconf_fail;
4034 ep->driver_data = fsg; // claim the endpoint 3374 ep->driver_data = fsg; // claim the endpoint
4035 fsg->bulk_in = ep; 3375 fsg->bulk_in = ep;
4036 3376
4037 ep = usb_ep_autoconfig(gadget, &fs_bulk_out_desc); 3377 ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
4038 if (!ep) 3378 if (!ep)
4039 goto autoconf_fail; 3379 goto autoconf_fail;
4040 ep->driver_data = fsg; // claim the endpoint 3380 ep->driver_data = fsg; // claim the endpoint
4041 fsg->bulk_out = ep; 3381 fsg->bulk_out = ep;
4042 3382
4043 if (transport_is_cbi()) { 3383 if (transport_is_cbi()) {
4044 ep = usb_ep_autoconfig(gadget, &fs_intr_in_desc); 3384 ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
4045 if (!ep) 3385 if (!ep)
4046 goto autoconf_fail; 3386 goto autoconf_fail;
4047 ep->driver_data = fsg; // claim the endpoint 3387 ep->driver_data = fsg; // claim the endpoint
@@ -4055,28 +3395,28 @@ static int __init fsg_bind(struct usb_gadget *gadget)
4055 device_desc.bcdDevice = cpu_to_le16(mod_data.release); 3395 device_desc.bcdDevice = cpu_to_le16(mod_data.release);
4056 3396
4057 i = (transport_is_cbi() ? 3 : 2); // Number of endpoints 3397 i = (transport_is_cbi() ? 3 : 2); // Number of endpoints
4058 intf_desc.bNumEndpoints = i; 3398 fsg_intf_desc.bNumEndpoints = i;
4059 intf_desc.bInterfaceSubClass = mod_data.protocol_type; 3399 fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
4060 intf_desc.bInterfaceProtocol = mod_data.transport_type; 3400 fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
4061 fs_function[i + FS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3401 fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
4062 3402
4063 if (gadget_is_dualspeed(gadget)) { 3403 if (gadget_is_dualspeed(gadget)) {
4064 hs_function[i + HS_FUNCTION_PRE_EP_ENTRIES] = NULL; 3404 fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
4065 3405
4066 /* Assume ep0 uses the same maxpacket value for both speeds */ 3406 /* Assume ep0 uses the same maxpacket value for both speeds */
4067 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket; 3407 dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
4068 3408
4069 /* Assume endpoint addresses are the same for both speeds */ 3409 /* Assume endpoint addresses are the same for both speeds */
4070 hs_bulk_in_desc.bEndpointAddress = 3410 fsg_hs_bulk_in_desc.bEndpointAddress =
4071 fs_bulk_in_desc.bEndpointAddress; 3411 fsg_fs_bulk_in_desc.bEndpointAddress;
4072 hs_bulk_out_desc.bEndpointAddress = 3412 fsg_hs_bulk_out_desc.bEndpointAddress =
4073 fs_bulk_out_desc.bEndpointAddress; 3413 fsg_fs_bulk_out_desc.bEndpointAddress;
4074 hs_intr_in_desc.bEndpointAddress = 3414 fsg_hs_intr_in_desc.bEndpointAddress =
4075 fs_intr_in_desc.bEndpointAddress; 3415 fsg_fs_intr_in_desc.bEndpointAddress;
4076 } 3416 }
4077 3417
4078 if (gadget_is_otg(gadget)) 3418 if (gadget_is_otg(gadget))
4079 otg_desc.bmAttributes |= USB_OTG_HNP; 3419 fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
4080 3420
4081 rc = -ENOMEM; 3421 rc = -ENOMEM;
4082 3422
@@ -4090,7 +3430,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
4090 req->complete = ep0_complete; 3430 req->complete = ep0_complete;
4091 3431
4092 /* Allocate the data buffers */ 3432 /* Allocate the data buffers */
4093 for (i = 0; i < NUM_BUFFERS; ++i) { 3433 for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
4094 struct fsg_buffhd *bh = &fsg->buffhds[i]; 3434 struct fsg_buffhd *bh = &fsg->buffhds[i];
4095 3435
4096 /* Allocate for the bulk-in endpoint. We assume that 3436 /* Allocate for the bulk-in endpoint. We assume that
@@ -4101,23 +3441,24 @@ static int __init fsg_bind(struct usb_gadget *gadget)
4101 goto out; 3441 goto out;
4102 bh->next = bh + 1; 3442 bh->next = bh + 1;
4103 } 3443 }
4104 fsg->buffhds[NUM_BUFFERS - 1].next = &fsg->buffhds[0]; 3444 fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
4105 3445
4106 /* This should reflect the actual gadget power source */ 3446 /* This should reflect the actual gadget power source */
4107 usb_gadget_set_selfpowered(gadget); 3447 usb_gadget_set_selfpowered(gadget);
4108 3448
4109 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s", 3449 snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
3450 "%s %s with %s",
4110 init_utsname()->sysname, init_utsname()->release, 3451 init_utsname()->sysname, init_utsname()->release,
4111 gadget->name); 3452 gadget->name);
4112 3453
4113 /* On a real device, serial[] would be loaded from permanent 3454 /* On a real device, serial[] would be loaded from permanent
4114 * storage. We just encode it from the driver version string. */ 3455 * storage. We just encode it from the driver version string. */
4115 for (i = 0; i < sizeof(serial) - 2; i += 2) { 3456 for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
4116 unsigned char c = DRIVER_VERSION[i / 2]; 3457 unsigned char c = DRIVER_VERSION[i / 2];
4117 3458
4118 if (!c) 3459 if (!c)
4119 break; 3460 break;
4120 sprintf(&serial[i], "%02X", c); 3461 sprintf(&fsg_string_serial[i], "%02X", c);
4121 } 3462 }
4122 3463
4123 fsg->thread_task = kthread_create(fsg_main_thread, fsg, 3464 fsg->thread_task = kthread_create(fsg_main_thread, fsg,
@@ -4133,7 +3474,7 @@ static int __init fsg_bind(struct usb_gadget *gadget)
4133 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 3474 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
4134 for (i = 0; i < fsg->nluns; ++i) { 3475 for (i = 0; i < fsg->nluns; ++i) {
4135 curlun = &fsg->luns[i]; 3476 curlun = &fsg->luns[i];
4136 if (backing_file_is_open(curlun)) { 3477 if (fsg_lun_is_open(curlun)) {
4137 p = NULL; 3478 p = NULL;
4138 if (pathbuf) { 3479 if (pathbuf) {
4139 p = d_path(&curlun->filp->f_path, 3480 p = d_path(&curlun->filp->f_path,
@@ -4203,7 +3544,7 @@ static struct usb_gadget_driver fsg_driver = {
4203#else 3544#else
4204 .speed = USB_SPEED_FULL, 3545 .speed = USB_SPEED_FULL,
4205#endif 3546#endif
4206 .function = (char *) longname, 3547 .function = (char *) fsg_string_product,
4207 .bind = fsg_bind, 3548 .bind = fsg_bind,
4208 .unbind = fsg_unbind, 3549 .unbind = fsg_unbind,
4209 .disconnect = fsg_disconnect, 3550 .disconnect = fsg_disconnect,
@@ -4212,7 +3553,7 @@ static struct usb_gadget_driver fsg_driver = {
4212 .resume = fsg_resume, 3553 .resume = fsg_resume,
4213 3554
4214 .driver = { 3555 .driver = {
4215 .name = (char *) shortname, 3556 .name = DRIVER_NAME,
4216 .owner = THIS_MODULE, 3557 .owner = THIS_MODULE,
4217 // .release = ... 3558 // .release = ...
4218 // .suspend = ... 3559 // .suspend = ...
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
new file mode 100644
index 000000000000..19619fbf20ac
--- /dev/null
+++ b/drivers/usb/gadget/mass_storage.c
@@ -0,0 +1,240 @@
1/*
2 * mass_storage.c -- Mass Storage USB Gadget
3 *
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyright (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24
25/*
26 * The Mass Storage Gadget acts as a USB Mass Storage device,
27 * appearing to the host as a disk drive or as a CD-ROM drive. In
28 * addition to providing an example of a genuinely useful gadget
29 * driver for a USB device, it also illustrates a technique of
30 * double-buffering for increased throughput. Last but not least, it
31 * gives an easy way to probe the behavior of the Mass Storage drivers
32 * in a USB host.
33 *
34 * Since this file serves only administrative purposes and all the
35 * business logic is implemented in f_mass_storage.* file. Read
36 * comments in this file for more detailed description.
37 */
38
39
40#include <linux/kernel.h>
41#include <linux/utsname.h>
42#include <linux/usb/ch9.h>
43
44
45/*-------------------------------------------------------------------------*/
46
47#define DRIVER_DESC "Mass Storage Gadget"
48#define DRIVER_VERSION "2009/09/11"
49
50/*-------------------------------------------------------------------------*/
51
52/*
53 * kbuild is not very cooperative with respect to linking separately
54 * compiled library objects into one module. So for now we won't use
55 * separate compilation ... ensuring init/exit sections work to shrink
56 * the runtime footprint, and giving us at least some parts of what
57 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
58 */
59
60#include "composite.c"
61#include "usbstring.c"
62#include "config.c"
63#include "epautoconf.c"
64#include "f_mass_storage.c"
65
66/*-------------------------------------------------------------------------*/
67
68static struct usb_device_descriptor msg_device_desc = {
69 .bLength = sizeof msg_device_desc,
70 .bDescriptorType = USB_DT_DEVICE,
71
72 .bcdUSB = cpu_to_le16(0x0200),
73 .bDeviceClass = USB_CLASS_PER_INTERFACE,
74
75 /* Vendor and product id can be overridden by module parameters. */
76 .idVendor = cpu_to_le16(FSG_VENDOR_ID),
77 .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
78 /* .bcdDevice = f(hardware) */
79 /* .iManufacturer = DYNAMIC */
80 /* .iProduct = DYNAMIC */
81 /* NO SERIAL NUMBER */
82 .bNumConfigurations = 1,
83};
84
85static struct usb_otg_descriptor otg_descriptor = {
86 .bLength = sizeof otg_descriptor,
87 .bDescriptorType = USB_DT_OTG,
88
89 /* REVISIT SRP-only hardware is possible, although
90 * it would not be called "OTG" ...
91 */
92 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
93};
94
95static const struct usb_descriptor_header *otg_desc[] = {
96 (struct usb_descriptor_header *) &otg_descriptor,
97 NULL,
98};
99
100
101/* string IDs are assigned dynamically */
102
103#define STRING_MANUFACTURER_IDX 0
104#define STRING_PRODUCT_IDX 1
105#define STRING_CONFIGURATION_IDX 2
106
107static char manufacturer[50];
108
109static struct usb_string strings_dev[] = {
110 [STRING_MANUFACTURER_IDX].s = manufacturer,
111 [STRING_PRODUCT_IDX].s = DRIVER_DESC,
112 [STRING_CONFIGURATION_IDX].s = "Self Powered",
113 { } /* end of list */
114};
115
116static struct usb_gadget_strings stringtab_dev = {
117 .language = 0x0409, /* en-us */
118 .strings = strings_dev,
119};
120
121static struct usb_gadget_strings *dev_strings[] = {
122 &stringtab_dev,
123 NULL,
124};
125
126
127
128/****************************** Configurations ******************************/
129
130static struct fsg_module_parameters mod_data = {
131 .stall = 1
132};
133FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
134
135static unsigned long msg_registered = 0;
136static void msg_cleanup(void);
137
138static int __init msg_do_config(struct usb_configuration *c)
139{
140 struct fsg_common *common;
141 struct fsg_config config;
142 int ret;
143
144 if (gadget_is_otg(c->cdev->gadget)) {
145 c->descriptors = otg_desc;
146 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
147 }
148
149 fsg_config_from_params(&config, &mod_data);
150 config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup;
151 common = fsg_common_init(0, c->cdev, &config);
152 if (IS_ERR(common))
153 return PTR_ERR(common);
154
155 ret = fsg_add(c->cdev, c, common);
156 fsg_common_put(common);
157 return ret;
158}
159
160static struct usb_configuration msg_config_driver = {
161 .label = "Linux File-Backed Storage",
162 .bind = msg_do_config,
163 .bConfigurationValue = 1,
164 /* .iConfiguration = DYNAMIC */
165 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
166};
167
168
169
170/****************************** Gadget Bind ******************************/
171
172
173static int __init msg_bind(struct usb_composite_dev *cdev)
174{
175 struct usb_gadget *gadget = cdev->gadget;
176 int status;
177
178 /* Allocate string descriptor numbers ... note that string
179 * contents can be overridden by the composite_dev glue.
180 */
181
182 /* device descriptor strings: manufacturer, product */
183 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
184 init_utsname()->sysname, init_utsname()->release,
185 gadget->name);
186 status = usb_string_id(cdev);
187 if (status < 0)
188 return status;
189 strings_dev[STRING_MANUFACTURER_IDX].id = status;
190 msg_device_desc.iManufacturer = status;
191
192 status = usb_string_id(cdev);
193 if (status < 0)
194 return status;
195 strings_dev[STRING_PRODUCT_IDX].id = status;
196 msg_device_desc.iProduct = status;
197
198 status = usb_string_id(cdev);
199 if (status < 0)
200 return status;
201 strings_dev[STRING_CONFIGURATION_IDX].id = status;
202 msg_config_driver.iConfiguration = status;
203
204 /* register our second configuration */
205 status = usb_add_config(cdev, &msg_config_driver);
206 if (status < 0)
207 return status;
208
209 dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
210 set_bit(0, &msg_registered);
211 return 0;
212}
213
214
215/****************************** Some noise ******************************/
216
217
218static struct usb_composite_driver msg_driver = {
219 .name = "g_mass_storage",
220 .dev = &msg_device_desc,
221 .strings = dev_strings,
222 .bind = msg_bind,
223};
224
225MODULE_DESCRIPTION(DRIVER_DESC);
226MODULE_AUTHOR("Michal Nazarewicz");
227MODULE_LICENSE("GPL");
228
229static int __init msg_init(void)
230{
231 return usb_composite_register(&msg_driver);
232}
233module_init(msg_init);
234
235static void msg_cleanup(void)
236{
237 if (test_and_clear_bit(0, &msg_registered))
238 usb_composite_unregister(&msg_driver);
239}
240module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
new file mode 100644
index 000000000000..429560100b10
--- /dev/null
+++ b/drivers/usb/gadget/multi.c
@@ -0,0 +1,358 @@
1/*
2 * multi.c -- Multifunction Composite driver
3 *
4 * Copyright (C) 2008 David Brownell
5 * Copyright (C) 2008 Nokia Corporation
6 * Copyright (C) 2009 Samsung Electronics
7 * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24
25#include <linux/kernel.h>
26#include <linux/utsname.h>
27
28
29#if defined USB_ETH_RNDIS
30# undef USB_ETH_RNDIS
31#endif
32#ifdef CONFIG_USB_ETH_RNDIS
33# define USB_ETH_RNDIS y
34#endif
35
36
37#define DRIVER_DESC "Multifunction Composite Gadget"
38#define DRIVER_VERSION "2009/07/21"
39
40/*-------------------------------------------------------------------------*/
41
42#define MULTI_VENDOR_NUM 0x0525 /* XXX NetChip */
43#define MULTI_PRODUCT_NUM 0xa4ab /* XXX */
44
45/*-------------------------------------------------------------------------*/
46
47/*
48 * kbuild is not very cooperative with respect to linking separately
49 * compiled library objects into one module. So for now we won't use
50 * separate compilation ... ensuring init/exit sections work to shrink
51 * the runtime footprint, and giving us at least some parts of what
52 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
53 */
54
55#include "composite.c"
56#include "usbstring.c"
57#include "config.c"
58#include "epautoconf.c"
59
60#include "u_serial.c"
61#include "f_acm.c"
62
63#include "f_ecm.c"
64#include "f_subset.c"
65#ifdef USB_ETH_RNDIS
66# include "f_rndis.c"
67# include "rndis.c"
68#endif
69#include "u_ether.c"
70
71#undef DBG /* u_ether.c has broken idea about macros */
72#undef VDBG /* so clean up after it */
73#undef ERROR
74#undef INFO
75#include "f_mass_storage.c"
76
77/*-------------------------------------------------------------------------*/
78
79static struct usb_device_descriptor device_desc = {
80 .bLength = sizeof device_desc,
81 .bDescriptorType = USB_DT_DEVICE,
82
83 .bcdUSB = cpu_to_le16(0x0200),
84
85 /* .bDeviceClass = USB_CLASS_COMM, */
86 /* .bDeviceSubClass = 0, */
87 /* .bDeviceProtocol = 0, */
88 .bDeviceClass = 0xEF,
89 .bDeviceSubClass = 2,
90 .bDeviceProtocol = 1,
91 /* .bMaxPacketSize0 = f(hardware) */
92
93 /* Vendor and product id can be overridden by module parameters. */
94 .idVendor = cpu_to_le16(MULTI_VENDOR_NUM),
95 .idProduct = cpu_to_le16(MULTI_PRODUCT_NUM),
96 /* .bcdDevice = f(hardware) */
97 /* .iManufacturer = DYNAMIC */
98 /* .iProduct = DYNAMIC */
99 /* NO SERIAL NUMBER */
100 .bNumConfigurations = 1,
101};
102
103static struct usb_otg_descriptor otg_descriptor = {
104 .bLength = sizeof otg_descriptor,
105 .bDescriptorType = USB_DT_OTG,
106
107 /* REVISIT SRP-only hardware is possible, although
108 * it would not be called "OTG" ...
109 */
110 .bmAttributes = USB_OTG_SRP | USB_OTG_HNP,
111};
112
113static const struct usb_descriptor_header *otg_desc[] = {
114 (struct usb_descriptor_header *) &otg_descriptor,
115 NULL,
116};
117
118
119/* string IDs are assigned dynamically */
120
121#define STRING_MANUFACTURER_IDX 0
122#define STRING_PRODUCT_IDX 1
123
124static char manufacturer[50];
125
126static struct usb_string strings_dev[] = {
127 [STRING_MANUFACTURER_IDX].s = manufacturer,
128 [STRING_PRODUCT_IDX].s = DRIVER_DESC,
129 { } /* end of list */
130};
131
132static struct usb_gadget_strings stringtab_dev = {
133 .language = 0x0409, /* en-us */
134 .strings = strings_dev,
135};
136
137static struct usb_gadget_strings *dev_strings[] = {
138 &stringtab_dev,
139 NULL,
140};
141
142static u8 hostaddr[ETH_ALEN];
143
144
145
146/****************************** Configurations ******************************/
147
148static struct fsg_module_parameters mod_data = {
149 .stall = 1
150};
151FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
152
153static struct fsg_common *fsg_common;
154
155
156#ifdef USB_ETH_RNDIS
157
158static int __init rndis_do_config(struct usb_configuration *c)
159{
160 int ret;
161
162 if (gadget_is_otg(c->cdev->gadget)) {
163 c->descriptors = otg_desc;
164 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
165 }
166
167 ret = rndis_bind_config(c, hostaddr);
168 if (ret < 0)
169 return ret;
170
171 ret = acm_bind_config(c, 0);
172 if (ret < 0)
173 return ret;
174
175 ret = fsg_add(c->cdev, c, fsg_common);
176 if (ret < 0)
177 return ret;
178
179 return 0;
180}
181
182static struct usb_configuration rndis_config_driver = {
183 .label = "Multifunction Composite (RNDIS + MS + ACM)",
184 .bind = rndis_do_config,
185 .bConfigurationValue = 2,
186 /* .iConfiguration = DYNAMIC */
187 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
188};
189
190#endif
191
192#ifdef CONFIG_USB_G_MULTI_CDC
193
194static int __init cdc_do_config(struct usb_configuration *c)
195{
196 int ret;
197
198 if (gadget_is_otg(c->cdev->gadget)) {
199 c->descriptors = otg_desc;
200 c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
201 }
202
203 ret = ecm_bind_config(c, hostaddr);
204 if (ret < 0)
205 return ret;
206
207 ret = acm_bind_config(c, 0);
208 if (ret < 0)
209 return ret;
210
211 ret = fsg_add(c->cdev, c, fsg_common);
212 if (ret < 0)
213 return ret;
214 if (ret < 0)
215 return ret;
216
217 return 0;
218}
219
220static struct usb_configuration cdc_config_driver = {
221 .label = "Multifunction Composite (CDC + MS + ACM)",
222 .bind = cdc_do_config,
223 .bConfigurationValue = 1,
224 /* .iConfiguration = DYNAMIC */
225 .bmAttributes = USB_CONFIG_ATT_SELFPOWER,
226};
227
228#endif
229
230
231
232/****************************** Gadget Bind ******************************/
233
234
235static int __init multi_bind(struct usb_composite_dev *cdev)
236{
237 struct usb_gadget *gadget = cdev->gadget;
238 int status, gcnum;
239
240 if (!can_support_ecm(cdev->gadget)) {
241 dev_err(&gadget->dev, "controller '%s' not usable\n",
242 gadget->name);
243 return -EINVAL;
244 }
245
246 /* set up network link layer */
247 status = gether_setup(cdev->gadget, hostaddr);
248 if (status < 0)
249 return status;
250
251 /* set up serial link layer */
252 status = gserial_setup(cdev->gadget, 1);
253 if (status < 0)
254 goto fail0;
255
256 /* set up mass storage function */
257 fsg_common = fsg_common_from_params(0, cdev, &mod_data);
258 if (IS_ERR(fsg_common)) {
259 status = PTR_ERR(fsg_common);
260 goto fail1;
261 }
262
263
264 gcnum = usb_gadget_controller_number(gadget);
265 if (gcnum >= 0)
266 device_desc.bcdDevice = cpu_to_le16(0x0300 | gcnum);
267 else {
268 /* We assume that can_support_ecm() tells the truth;
269 * but if the controller isn't recognized at all then
270 * that assumption is a bit more likely to be wrong.
271 */
272 WARNING(cdev, "controller '%s' not recognized\n",
273 gadget->name);
274 device_desc.bcdDevice = cpu_to_le16(0x0300 | 0x0099);
275 }
276
277
278 /* Allocate string descriptor numbers ... note that string
279 * contents can be overridden by the composite_dev glue.
280 */
281
282 /* device descriptor strings: manufacturer, product */
283 snprintf(manufacturer, sizeof manufacturer, "%s %s with %s",
284 init_utsname()->sysname, init_utsname()->release,
285 gadget->name);
286 status = usb_string_id(cdev);
287 if (status < 0)
288 goto fail2;
289 strings_dev[STRING_MANUFACTURER_IDX].id = status;
290 device_desc.iManufacturer = status;
291
292 status = usb_string_id(cdev);
293 if (status < 0)
294 goto fail2;
295 strings_dev[STRING_PRODUCT_IDX].id = status;
296 device_desc.iProduct = status;
297
298#ifdef USB_ETH_RNDIS
299 /* register our first configuration */
300 status = usb_add_config(cdev, &rndis_config_driver);
301 if (status < 0)
302 goto fail2;
303#endif
304
305#ifdef CONFIG_USB_G_MULTI_CDC
306 /* register our second configuration */
307 status = usb_add_config(cdev, &cdc_config_driver);
308 if (status < 0)
309 goto fail2;
310#endif
311
312 dev_info(&gadget->dev, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
313 fsg_common_put(fsg_common);
314 return 0;
315
316fail2:
317 fsg_common_put(fsg_common);
318fail1:
319 gserial_cleanup();
320fail0:
321 gether_cleanup();
322 return status;
323}
324
325static int __exit multi_unbind(struct usb_composite_dev *cdev)
326{
327 gserial_cleanup();
328 gether_cleanup();
329 return 0;
330}
331
332
333/****************************** Some noise ******************************/
334
335
336static struct usb_composite_driver multi_driver = {
337 .name = "g_multi",
338 .dev = &device_desc,
339 .strings = dev_strings,
340 .bind = multi_bind,
341 .unbind = __exit_p(multi_unbind),
342};
343
344MODULE_DESCRIPTION(DRIVER_DESC);
345MODULE_AUTHOR("Michal Nazarewicz");
346MODULE_LICENSE("GPL");
347
348static int __init g_multi_init(void)
349{
350 return usb_composite_register(&multi_driver);
351}
352module_init(g_multi_init);
353
354static void __exit g_multi_cleanup(void)
355{
356 usb_composite_unregister(&multi_driver);
357}
358module_exit(g_multi_cleanup);
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
new file mode 100644
index 000000000000..868d8ee86756
--- /dev/null
+++ b/drivers/usb/gadget/storage_common.c
@@ -0,0 +1,778 @@
1/*
2 * storage_common.c -- Common definitions for mass storage functionality
3 *
4 * Copyright (C) 2003-2008 Alan Stern
5 * Copyeight (C) 2009 Samsung Electronics
6 * Author: Michal Nazarewicz (m.nazarewicz@samsung.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23
24/*
25 * This file requires the following identifiers used in USB strings to
26 * be defined (each of type pointer to char):
27 * - fsg_string_manufacturer -- name of the manufacturer
28 * - fsg_string_product -- name of the product
29 * - fsg_string_serial -- product's serial
30 * - fsg_string_config -- name of the configuration
31 * - fsg_string_interface -- name of the interface
32 * The first four are only needed when FSG_DESCRIPTORS_DEVICE_STRINGS
33 * macro is defined prior to including this file.
34 */
35
36/*
37 * When FSG_NO_INTR_EP is defined fsg_fs_intr_in_desc and
38 * fsg_hs_intr_in_desc objects as well as
39 * FSG_FS_FUNCTION_PRE_EP_ENTRIES and FSG_HS_FUNCTION_PRE_EP_ENTRIES
40 * macros are not defined.
41 *
42 * When FSG_NO_DEVICE_STRINGS is defined FSG_STRING_MANUFACTURER,
43 * FSG_STRING_PRODUCT, FSG_STRING_SERIAL and FSG_STRING_CONFIG are not
44 * defined (as well as corresponding entries in string tables are
45 * missing) and FSG_STRING_INTERFACE has value of zero.
46 *
47 * When FSG_NO_OTG is defined fsg_otg_desc won't be defined.
48 */
49
50/*
51 * When FSG_BUFFHD_STATIC_BUFFER is defined when this file is included
52 * the fsg_buffhd structure's buf field will be an array of FSG_BUFLEN
53 * characters rather then a pointer to void.
54 */
55
56
57#include <asm/unaligned.h>
58
59
60/* Thanks to NetChip Technologies for donating this product ID.
61 *
62 * DO NOT REUSE THESE IDs with any other driver!! Ever!!
63 * Instead: allocate your own, using normal USB-IF procedures. */
64#define FSG_VENDOR_ID 0x0525 /* NetChip */
65#define FSG_PRODUCT_ID 0xa4a5 /* Linux-USB File-backed Storage Gadget */
66
67
68/*-------------------------------------------------------------------------*/
69
70
71#ifndef DEBUG
72#undef VERBOSE_DEBUG
73#undef DUMP_MSGS
74#endif /* !DEBUG */
75
76#ifdef VERBOSE_DEBUG
77#define VLDBG LDBG
78#else
79#define VLDBG(lun, fmt, args...) do { } while (0)
80#endif /* VERBOSE_DEBUG */
81
82#define LDBG(lun, fmt, args...) dev_dbg (&(lun)->dev, fmt, ## args)
83#define LERROR(lun, fmt, args...) dev_err (&(lun)->dev, fmt, ## args)
84#define LWARN(lun, fmt, args...) dev_warn(&(lun)->dev, fmt, ## args)
85#define LINFO(lun, fmt, args...) dev_info(&(lun)->dev, fmt, ## args)
86
87/* Keep those macros in sync with thos in
88 * include/linux/ubs/composite.h or else GCC will complain. If they
89 * are identical (the same names of arguments, white spaces in the
90 * same places) GCC will allow redefinition otherwise (even if some
91 * white space is removed or added) warning will be issued. No
92 * checking if those symbols is defined is performed because warning
93 * is desired when those macros were defined by someone else to mean
94 * something else. */
95#define DBG(d, fmt, args...) dev_dbg(&(d)->gadget->dev , fmt , ## args)
96#define VDBG(d, fmt, args...) dev_vdbg(&(d)->gadget->dev , fmt , ## args)
97#define ERROR(d, fmt, args...) dev_err(&(d)->gadget->dev , fmt , ## args)
98#define WARNING(d, fmt, args...) dev_warn(&(d)->gadget->dev , fmt , ## args)
99#define INFO(d, fmt, args...) dev_info(&(d)->gadget->dev , fmt , ## args)
100
101
102
103#ifdef DUMP_MSGS
104
105# define dump_msg(fsg, /* const char * */ label, \
106 /* const u8 * */ buf, /* unsigned */ length) do { \
107 if (length < 512) { \
108 DBG(fsg, "%s, length %u:\n", label, length); \
109 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, \
110 16, 1, buf, length, 0); \
111 } \
112} while (0)
113
114# define dump_cdb(fsg) do { } while (0)
115
116#else
117
118# define dump_msg(fsg, /* const char * */ label, \
119 /* const u8 * */ buf, /* unsigned */ length) do { } while (0)
120
121# ifdef VERBOSE_DEBUG
122
123# define dump_cdb(fsg) \
124 print_hex_dump(KERN_DEBUG, "SCSI CDB: ", DUMP_PREFIX_NONE, \
125 16, 1, (fsg)->cmnd, (fsg)->cmnd_size, 0) \
126
127# else
128
129# define dump_cdb(fsg) do { } while (0)
130
131# endif /* VERBOSE_DEBUG */
132
133#endif /* DUMP_MSGS */
134
135
136
137
138
139/*-------------------------------------------------------------------------*/
140
141/* SCSI device types */
142#define TYPE_DISK 0x00
143#define TYPE_CDROM 0x05
144
145/* USB protocol value = the transport method */
146#define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */
147#define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */
148#define USB_PR_BULK 0x50 /* Bulk-only */
149
150/* USB subclass value = the protocol encapsulation */
151#define USB_SC_RBC 0x01 /* Reduced Block Commands (flash) */
152#define USB_SC_8020 0x02 /* SFF-8020i, MMC-2, ATAPI (CD-ROM) */
153#define USB_SC_QIC 0x03 /* QIC-157 (tape) */
154#define USB_SC_UFI 0x04 /* UFI (floppy) */
155#define USB_SC_8070 0x05 /* SFF-8070i (removable) */
156#define USB_SC_SCSI 0x06 /* Transparent SCSI */
157
158/* Bulk-only data structures */
159
160/* Command Block Wrapper */
161struct fsg_bulk_cb_wrap {
162 __le32 Signature; /* Contains 'USBC' */
163 u32 Tag; /* Unique per command id */
164 __le32 DataTransferLength; /* Size of the data */
165 u8 Flags; /* Direction in bit 7 */
166 u8 Lun; /* LUN (normally 0) */
167 u8 Length; /* Of the CDB, <= MAX_COMMAND_SIZE */
168 u8 CDB[16]; /* Command Data Block */
169};
170
171#define USB_BULK_CB_WRAP_LEN 31
172#define USB_BULK_CB_SIG 0x43425355 /* Spells out USBC */
173#define USB_BULK_IN_FLAG 0x80
174
175/* Command Status Wrapper */
176struct bulk_cs_wrap {
177 __le32 Signature; /* Should = 'USBS' */
178 u32 Tag; /* Same as original command */
179 __le32 Residue; /* Amount not transferred */
180 u8 Status; /* See below */
181};
182
183#define USB_BULK_CS_WRAP_LEN 13
184#define USB_BULK_CS_SIG 0x53425355 /* Spells out 'USBS' */
185#define USB_STATUS_PASS 0
186#define USB_STATUS_FAIL 1
187#define USB_STATUS_PHASE_ERROR 2
188
189/* Bulk-only class specific requests */
190#define USB_BULK_RESET_REQUEST 0xff
191#define USB_BULK_GET_MAX_LUN_REQUEST 0xfe
192
193
194/* CBI Interrupt data structure */
195struct interrupt_data {
196 u8 bType;
197 u8 bValue;
198};
199
200#define CBI_INTERRUPT_DATA_LEN 2
201
202/* CBI Accept Device-Specific Command request */
203#define USB_CBI_ADSC_REQUEST 0x00
204
205
206/* Length of a SCSI Command Data Block */
207#define MAX_COMMAND_SIZE 16
208
209/* SCSI commands that we recognize */
210#define SC_FORMAT_UNIT 0x04
211#define SC_INQUIRY 0x12
212#define SC_MODE_SELECT_6 0x15
213#define SC_MODE_SELECT_10 0x55
214#define SC_MODE_SENSE_6 0x1a
215#define SC_MODE_SENSE_10 0x5a
216#define SC_PREVENT_ALLOW_MEDIUM_REMOVAL 0x1e
217#define SC_READ_6 0x08
218#define SC_READ_10 0x28
219#define SC_READ_12 0xa8
220#define SC_READ_CAPACITY 0x25
221#define SC_READ_FORMAT_CAPACITIES 0x23
222#define SC_READ_HEADER 0x44
223#define SC_READ_TOC 0x43
224#define SC_RELEASE 0x17
225#define SC_REQUEST_SENSE 0x03
226#define SC_RESERVE 0x16
227#define SC_SEND_DIAGNOSTIC 0x1d
228#define SC_START_STOP_UNIT 0x1b
229#define SC_SYNCHRONIZE_CACHE 0x35
230#define SC_TEST_UNIT_READY 0x00
231#define SC_VERIFY 0x2f
232#define SC_WRITE_6 0x0a
233#define SC_WRITE_10 0x2a
234#define SC_WRITE_12 0xaa
235
236/* SCSI Sense Key/Additional Sense Code/ASC Qualifier values */
237#define SS_NO_SENSE 0
238#define SS_COMMUNICATION_FAILURE 0x040800
239#define SS_INVALID_COMMAND 0x052000
240#define SS_INVALID_FIELD_IN_CDB 0x052400
241#define SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE 0x052100
242#define SS_LOGICAL_UNIT_NOT_SUPPORTED 0x052500
243#define SS_MEDIUM_NOT_PRESENT 0x023a00
244#define SS_MEDIUM_REMOVAL_PREVENTED 0x055302
245#define SS_NOT_READY_TO_READY_TRANSITION 0x062800
246#define SS_RESET_OCCURRED 0x062900
247#define SS_SAVING_PARAMETERS_NOT_SUPPORTED 0x053900
248#define SS_UNRECOVERED_READ_ERROR 0x031100
249#define SS_WRITE_ERROR 0x030c02
250#define SS_WRITE_PROTECTED 0x072700
251
252#define SK(x) ((u8) ((x) >> 16)) /* Sense Key byte, etc. */
253#define ASC(x) ((u8) ((x) >> 8))
254#define ASCQ(x) ((u8) (x))
255
256
257/*-------------------------------------------------------------------------*/
258
259
260struct fsg_lun {
261 struct file *filp;
262 loff_t file_length;
263 loff_t num_sectors;
264
265 unsigned int initially_ro:1;
266 unsigned int ro:1;
267 unsigned int removable:1;
268 unsigned int cdrom:1;
269 unsigned int prevent_medium_removal:1;
270 unsigned int registered:1;
271 unsigned int info_valid:1;
272
273 u32 sense_data;
274 u32 sense_data_info;
275 u32 unit_attention_data;
276
277 struct device dev;
278};
279
280#define fsg_lun_is_open(curlun) ((curlun)->filp != NULL)
281
282static struct fsg_lun *fsg_lun_from_dev(struct device *dev)
283{
284 return container_of(dev, struct fsg_lun, dev);
285}
286
287
288/* Big enough to hold our biggest descriptor */
289#define EP0_BUFSIZE 256
290#define DELAYED_STATUS (EP0_BUFSIZE + 999) /* An impossibly large value */
291
292/* Number of buffers we will use. 2 is enough for double-buffering */
293#define FSG_NUM_BUFFERS 2
294
295/* Default size of buffer length. */
296#define FSG_BUFLEN ((u32)16384)
297
298/* Maximal number of LUNs supported in mass storage function */
299#define FSG_MAX_LUNS 8
300
301enum fsg_buffer_state {
302 BUF_STATE_EMPTY = 0,
303 BUF_STATE_FULL,
304 BUF_STATE_BUSY
305};
306
307struct fsg_buffhd {
308#ifdef FSG_BUFFHD_STATIC_BUFFER
309 char buf[FSG_BUFLEN];
310#else
311 void *buf;
312#endif
313 enum fsg_buffer_state state;
314 struct fsg_buffhd *next;
315
316 /* The NetChip 2280 is faster, and handles some protocol faults
317 * better, if we don't submit any short bulk-out read requests.
318 * So we will record the intended request length here. */
319 unsigned int bulk_out_intended_length;
320
321 struct usb_request *inreq;
322 int inreq_busy;
323 struct usb_request *outreq;
324 int outreq_busy;
325};
326
327enum fsg_state {
328 /* This one isn't used anywhere */
329 FSG_STATE_COMMAND_PHASE = -10,
330 FSG_STATE_DATA_PHASE,
331 FSG_STATE_STATUS_PHASE,
332
333 FSG_STATE_IDLE = 0,
334 FSG_STATE_ABORT_BULK_OUT,
335 FSG_STATE_RESET,
336 FSG_STATE_INTERFACE_CHANGE,
337 FSG_STATE_CONFIG_CHANGE,
338 FSG_STATE_DISCONNECT,
339 FSG_STATE_EXIT,
340 FSG_STATE_TERMINATED
341};
342
343enum data_direction {
344 DATA_DIR_UNKNOWN = 0,
345 DATA_DIR_FROM_HOST,
346 DATA_DIR_TO_HOST,
347 DATA_DIR_NONE
348};
349
350
351/*-------------------------------------------------------------------------*/
352
353
354static inline u32 get_unaligned_be24(u8 *buf)
355{
356 return 0xffffff & (u32) get_unaligned_be32(buf - 1);
357}
358
359
360/*-------------------------------------------------------------------------*/
361
362
363enum {
364#ifndef FSG_NO_DEVICE_STRINGS
365 FSG_STRING_MANUFACTURER = 1,
366 FSG_STRING_PRODUCT,
367 FSG_STRING_SERIAL,
368 FSG_STRING_CONFIG,
369#endif
370 FSG_STRING_INTERFACE
371};
372
373
374#ifndef FSG_NO_OTG
375static struct usb_otg_descriptor
376fsg_otg_desc = {
377 .bLength = sizeof fsg_otg_desc,
378 .bDescriptorType = USB_DT_OTG,
379
380 .bmAttributes = USB_OTG_SRP,
381};
382#endif
383
384/* There is only one interface. */
385
386static struct usb_interface_descriptor
387fsg_intf_desc = {
388 .bLength = sizeof fsg_intf_desc,
389 .bDescriptorType = USB_DT_INTERFACE,
390
391 .bNumEndpoints = 2, /* Adjusted during fsg_bind() */
392 .bInterfaceClass = USB_CLASS_MASS_STORAGE,
393 .bInterfaceSubClass = USB_SC_SCSI, /* Adjusted during fsg_bind() */
394 .bInterfaceProtocol = USB_PR_BULK, /* Adjusted during fsg_bind() */
395 .iInterface = FSG_STRING_INTERFACE,
396};
397
398/* Three full-speed endpoint descriptors: bulk-in, bulk-out,
399 * and interrupt-in. */
400
401static struct usb_endpoint_descriptor
402fsg_fs_bulk_in_desc = {
403 .bLength = USB_DT_ENDPOINT_SIZE,
404 .bDescriptorType = USB_DT_ENDPOINT,
405
406 .bEndpointAddress = USB_DIR_IN,
407 .bmAttributes = USB_ENDPOINT_XFER_BULK,
408 /* wMaxPacketSize set by autoconfiguration */
409};
410
411static struct usb_endpoint_descriptor
412fsg_fs_bulk_out_desc = {
413 .bLength = USB_DT_ENDPOINT_SIZE,
414 .bDescriptorType = USB_DT_ENDPOINT,
415
416 .bEndpointAddress = USB_DIR_OUT,
417 .bmAttributes = USB_ENDPOINT_XFER_BULK,
418 /* wMaxPacketSize set by autoconfiguration */
419};
420
421#ifndef FSG_NO_INTR_EP
422
423static struct usb_endpoint_descriptor
424fsg_fs_intr_in_desc = {
425 .bLength = USB_DT_ENDPOINT_SIZE,
426 .bDescriptorType = USB_DT_ENDPOINT,
427
428 .bEndpointAddress = USB_DIR_IN,
429 .bmAttributes = USB_ENDPOINT_XFER_INT,
430 .wMaxPacketSize = cpu_to_le16(2),
431 .bInterval = 32, /* frames -> 32 ms */
432};
433
434#ifndef FSG_NO_OTG
435# define FSG_FS_FUNCTION_PRE_EP_ENTRIES 2
436#else
437# define FSG_FS_FUNCTION_PRE_EP_ENTRIES 1
438#endif
439
440#endif
441
442static struct usb_descriptor_header *fsg_fs_function[] = {
443#ifndef FSG_NO_OTG
444 (struct usb_descriptor_header *) &fsg_otg_desc,
445#endif
446 (struct usb_descriptor_header *) &fsg_intf_desc,
447 (struct usb_descriptor_header *) &fsg_fs_bulk_in_desc,
448 (struct usb_descriptor_header *) &fsg_fs_bulk_out_desc,
449#ifndef FSG_NO_INTR_EP
450 (struct usb_descriptor_header *) &fsg_fs_intr_in_desc,
451#endif
452 NULL,
453};
454
455
456/*
457 * USB 2.0 devices need to expose both high speed and full speed
458 * descriptors, unless they only run at full speed.
459 *
460 * That means alternate endpoint descriptors (bigger packets)
461 * and a "device qualifier" ... plus more construction options
462 * for the config descriptor.
463 */
464static struct usb_endpoint_descriptor
465fsg_hs_bulk_in_desc = {
466 .bLength = USB_DT_ENDPOINT_SIZE,
467 .bDescriptorType = USB_DT_ENDPOINT,
468
469 /* bEndpointAddress copied from fs_bulk_in_desc during fsg_bind() */
470 .bmAttributes = USB_ENDPOINT_XFER_BULK,
471 .wMaxPacketSize = cpu_to_le16(512),
472};
473
474static struct usb_endpoint_descriptor
475fsg_hs_bulk_out_desc = {
476 .bLength = USB_DT_ENDPOINT_SIZE,
477 .bDescriptorType = USB_DT_ENDPOINT,
478
479 /* bEndpointAddress copied from fs_bulk_out_desc during fsg_bind() */
480 .bmAttributes = USB_ENDPOINT_XFER_BULK,
481 .wMaxPacketSize = cpu_to_le16(512),
482 .bInterval = 1, /* NAK every 1 uframe */
483};
484
485#ifndef FSG_NO_INTR_EP
486
487static struct usb_endpoint_descriptor
488fsg_hs_intr_in_desc = {
489 .bLength = USB_DT_ENDPOINT_SIZE,
490 .bDescriptorType = USB_DT_ENDPOINT,
491
492 /* bEndpointAddress copied from fs_intr_in_desc during fsg_bind() */
493 .bmAttributes = USB_ENDPOINT_XFER_INT,
494 .wMaxPacketSize = cpu_to_le16(2),
495 .bInterval = 9, /* 2**(9-1) = 256 uframes -> 32 ms */
496};
497
498#ifndef FSG_NO_OTG
499# define FSG_HS_FUNCTION_PRE_EP_ENTRIES 2
500#else
501# define FSG_HS_FUNCTION_PRE_EP_ENTRIES 1
502#endif
503
504#endif
505
506static struct usb_descriptor_header *fsg_hs_function[] = {
507#ifndef FSG_NO_OTG
508 (struct usb_descriptor_header *) &fsg_otg_desc,
509#endif
510 (struct usb_descriptor_header *) &fsg_intf_desc,
511 (struct usb_descriptor_header *) &fsg_hs_bulk_in_desc,
512 (struct usb_descriptor_header *) &fsg_hs_bulk_out_desc,
513#ifndef FSG_NO_INTR_EP
514 (struct usb_descriptor_header *) &fsg_hs_intr_in_desc,
515#endif
516 NULL,
517};
518
519/* Maxpacket and other transfer characteristics vary by speed. */
520static struct usb_endpoint_descriptor *
521fsg_ep_desc(struct usb_gadget *g, struct usb_endpoint_descriptor *fs,
522 struct usb_endpoint_descriptor *hs)
523{
524 if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
525 return hs;
526 return fs;
527}
528
529
530/* Static strings, in UTF-8 (for simplicity we use only ASCII characters) */
531static struct usb_string fsg_strings[] = {
532#ifndef FSG_NO_DEVICE_STRINGS
533 {FSG_STRING_MANUFACTURER, fsg_string_manufacturer},
534 {FSG_STRING_PRODUCT, fsg_string_product},
535 {FSG_STRING_SERIAL, fsg_string_serial},
536 {FSG_STRING_CONFIG, fsg_string_config},
537#endif
538 {FSG_STRING_INTERFACE, fsg_string_interface},
539 {}
540};
541
542static struct usb_gadget_strings fsg_stringtab = {
543 .language = 0x0409, /* en-us */
544 .strings = fsg_strings,
545};
546
547
548 /*-------------------------------------------------------------------------*/
549
550/* If the next two routines are called while the gadget is registered,
551 * the caller must own fsg->filesem for writing. */
552
553static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
554{
555 int ro;
556 struct file *filp = NULL;
557 int rc = -EINVAL;
558 struct inode *inode = NULL;
559 loff_t size;
560 loff_t num_sectors;
561 loff_t min_sectors;
562
563 /* R/W if we can, R/O if we must */
564 ro = curlun->initially_ro;
565 if (!ro) {
566 filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
567 if (-EROFS == PTR_ERR(filp))
568 ro = 1;
569 }
570 if (ro)
571 filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
572 if (IS_ERR(filp)) {
573 LINFO(curlun, "unable to open backing file: %s\n", filename);
574 return PTR_ERR(filp);
575 }
576
577 if (!(filp->f_mode & FMODE_WRITE))
578 ro = 1;
579
580 if (filp->f_path.dentry)
581 inode = filp->f_path.dentry->d_inode;
582 if (inode && S_ISBLK(inode->i_mode)) {
583 if (bdev_read_only(inode->i_bdev))
584 ro = 1;
585 } else if (!inode || !S_ISREG(inode->i_mode)) {
586 LINFO(curlun, "invalid file type: %s\n", filename);
587 goto out;
588 }
589
590 /* If we can't read the file, it's no good.
591 * If we can't write the file, use it read-only. */
592 if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) {
593 LINFO(curlun, "file not readable: %s\n", filename);
594 goto out;
595 }
596 if (!(filp->f_op->write || filp->f_op->aio_write))
597 ro = 1;
598
599 size = i_size_read(inode->i_mapping->host);
600 if (size < 0) {
601 LINFO(curlun, "unable to find file size: %s\n", filename);
602 rc = (int) size;
603 goto out;
604 }
605 num_sectors = size >> 9; /* File size in 512-byte blocks */
606 min_sectors = 1;
607 if (curlun->cdrom) {
608 num_sectors &= ~3; /* Reduce to a multiple of 2048 */
609 min_sectors = 300*4; /* Smallest track is 300 frames */
610 if (num_sectors >= 256*60*75*4) {
611 num_sectors = (256*60*75 - 1) * 4;
612 LINFO(curlun, "file too big: %s\n", filename);
613 LINFO(curlun, "using only first %d blocks\n",
614 (int) num_sectors);
615 }
616 }
617 if (num_sectors < min_sectors) {
618 LINFO(curlun, "file too small: %s\n", filename);
619 rc = -ETOOSMALL;
620 goto out;
621 }
622
623 get_file(filp);
624 curlun->ro = ro;
625 curlun->filp = filp;
626 curlun->file_length = size;
627 curlun->num_sectors = num_sectors;
628 LDBG(curlun, "open backing file: %s\n", filename);
629 rc = 0;
630
631out:
632 filp_close(filp, current->files);
633 return rc;
634}
635
636
637static void fsg_lun_close(struct fsg_lun *curlun)
638{
639 if (curlun->filp) {
640 LDBG(curlun, "close backing file\n");
641 fput(curlun->filp);
642 curlun->filp = NULL;
643 }
644}
645
646
647/*-------------------------------------------------------------------------*/
648
649/* Sync the file data, don't bother with the metadata.
650 * This code was copied from fs/buffer.c:sys_fdatasync(). */
651static int fsg_lun_fsync_sub(struct fsg_lun *curlun)
652{
653 struct file *filp = curlun->filp;
654
655 if (curlun->ro || !filp)
656 return 0;
657 return vfs_fsync(filp, filp->f_path.dentry, 1);
658}
659
660static void store_cdrom_address(u8 *dest, int msf, u32 addr)
661{
662 if (msf) {
663 /* Convert to Minutes-Seconds-Frames */
664 addr >>= 2; /* Convert to 2048-byte frames */
665 addr += 2*75; /* Lead-in occupies 2 seconds */
666 dest[3] = addr % 75; /* Frames */
667 addr /= 75;
668 dest[2] = addr % 60; /* Seconds */
669 addr /= 60;
670 dest[1] = addr; /* Minutes */
671 dest[0] = 0; /* Reserved */
672 } else {
673 /* Absolute sector */
674 put_unaligned_be32(addr, dest);
675 }
676}
677
678
679/*-------------------------------------------------------------------------*/
680
681
682static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
683 char *buf)
684{
685 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
686
687 return sprintf(buf, "%d\n", fsg_lun_is_open(curlun)
688 ? curlun->ro
689 : curlun->initially_ro);
690}
691
692static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
693 char *buf)
694{
695 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
696 struct rw_semaphore *filesem = dev_get_drvdata(dev);
697 char *p;
698 ssize_t rc;
699
700 down_read(filesem);
701 if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */
702 p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1);
703 if (IS_ERR(p))
704 rc = PTR_ERR(p);
705 else {
706 rc = strlen(p);
707 memmove(buf, p, rc);
708 buf[rc] = '\n'; /* Add a newline */
709 buf[++rc] = 0;
710 }
711 } else { /* No file, return 0 bytes */
712 *buf = 0;
713 rc = 0;
714 }
715 up_read(filesem);
716 return rc;
717}
718
719
720static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
721 const char *buf, size_t count)
722{
723 ssize_t rc = count;
724 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
725 struct rw_semaphore *filesem = dev_get_drvdata(dev);
726 int i;
727
728 if (sscanf(buf, "%d", &i) != 1)
729 return -EINVAL;
730
731 /* Allow the write-enable status to change only while the backing file
732 * is closed. */
733 down_read(filesem);
734 if (fsg_lun_is_open(curlun)) {
735 LDBG(curlun, "read-only status change prevented\n");
736 rc = -EBUSY;
737 } else {
738 curlun->ro = !!i;
739 curlun->initially_ro = !!i;
740 LDBG(curlun, "read-only status set to %d\n", curlun->ro);
741 }
742 up_read(filesem);
743 return rc;
744}
745
746static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
747 const char *buf, size_t count)
748{
749 struct fsg_lun *curlun = fsg_lun_from_dev(dev);
750 struct rw_semaphore *filesem = dev_get_drvdata(dev);
751 int rc = 0;
752
753 if (curlun->prevent_medium_removal && fsg_lun_is_open(curlun)) {
754 LDBG(curlun, "eject attempt prevented\n");
755 return -EBUSY; /* "Door is locked" */
756 }
757
758 /* Remove a trailing newline */
759 if (count > 0 && buf[count-1] == '\n')
760 ((char *) buf)[count-1] = 0; /* Ugh! */
761
762 /* Eject current medium */
763 down_write(filesem);
764 if (fsg_lun_is_open(curlun)) {
765 fsg_lun_close(curlun);
766 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
767 }
768
769 /* Load new medium */
770 if (count > 0 && buf[0]) {
771 rc = fsg_lun_open(curlun, buf);
772 if (rc == 0)
773 curlun->unit_attention_data =
774 SS_NOT_READY_TO_READY_TRANSITION;
775 }
776 up_write(filesem);
777 return (rc < 0 ? rc : count);
778}
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index 91b39ffdf6ea..fd55f450bc0e 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -112,7 +112,7 @@ int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
112int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 112int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
113int eem_bind_config(struct usb_configuration *c); 113int eem_bind_config(struct usb_configuration *c);
114 114
115#ifdef CONFIG_USB_ETH_RNDIS 115#ifdef USB_ETH_RNDIS
116 116
117int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]); 117int rndis_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
118 118
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 9b43b226817f..2678a1624fcc 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -90,14 +90,25 @@ config USB_EHCI_TT_NEWSCHED
90 90
91config USB_EHCI_BIG_ENDIAN_MMIO 91config USB_EHCI_BIG_ENDIAN_MMIO
92 bool 92 bool
93 depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX) 93 depends on USB_EHCI_HCD && (PPC_CELLEB || PPC_PS3 || 440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
94 default y 94 default y
95 95
96config USB_EHCI_BIG_ENDIAN_DESC 96config USB_EHCI_BIG_ENDIAN_DESC
97 bool 97 bool
98 depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX) 98 depends on USB_EHCI_HCD && (440EPX || ARCH_IXP4XX || XPS_USB_HCD_XILINX)
99 default y 99 default y
100 100
101config XPS_USB_HCD_XILINX
102 bool "Use Xilinx usb host EHCI controller core"
103 depends on USB_EHCI_HCD && (PPC32 || MICROBLAZE)
104 select USB_EHCI_BIG_ENDIAN_DESC
105 select USB_EHCI_BIG_ENDIAN_MMIO
106 ---help---
107 Xilinx xps USB host controller core is EHCI compilant and has
108 transaction translator built-in. It can be configured to either
109 support both high speed and full speed devices, or high speed
110 devices only.
111
101config USB_EHCI_FSL 112config USB_EHCI_FSL
102 bool "Support for Freescale on-chip EHCI USB controller" 113 bool "Support for Freescale on-chip EHCI USB controller"
103 depends on USB_EHCI_HCD && FSL_SOC 114 depends on USB_EHCI_HCD && FSL_SOC
@@ -105,6 +116,13 @@ config USB_EHCI_FSL
105 ---help--- 116 ---help---
106 Variation of ARC USB block used in some Freescale chips. 117 Variation of ARC USB block used in some Freescale chips.
107 118
119config USB_EHCI_MXC
120 bool "Support for Freescale on-chip EHCI USB controller"
121 depends on USB_EHCI_HCD && ARCH_MXC
122 select USB_EHCI_ROOT_HUB_TT
123 ---help---
124 Variation of ARC USB block used in some Freescale chips.
125
108config USB_EHCI_HCD_PPC_OF 126config USB_EHCI_HCD_PPC_OF
109 bool "EHCI support for PPC USB controller on OF platform bus" 127 bool "EHCI support for PPC USB controller on OF platform bus"
110 depends on USB_EHCI_HCD && PPC_OF 128 depends on USB_EHCI_HCD && PPC_OF
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d8f4aaa616f2..5859522d6edd 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -549,7 +549,7 @@ static int ehci_init(struct usb_hcd *hcd)
549 /* controllers may cache some of the periodic schedule ... */ 549 /* controllers may cache some of the periodic schedule ... */
550 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); 550 hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params);
551 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache 551 if (HCC_ISOC_CACHE(hcc_params)) // full frame cache
552 ehci->i_thresh = 8; 552 ehci->i_thresh = 2 + 8;
553 else // N microframes cached 553 else // N microframes cached
554 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); 554 ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
555 555
@@ -605,6 +605,8 @@ static int ehci_init(struct usb_hcd *hcd)
605 } 605 }
606 ehci->command = temp; 606 ehci->command = temp;
607 607
608 /* Accept arbitrarily long scatter-gather lists */
609 hcd->self.sg_tablesize = ~0;
608 return 0; 610 return 0;
609} 611}
610 612
@@ -1105,11 +1107,21 @@ MODULE_LICENSE ("GPL");
1105#define PLATFORM_DRIVER ehci_fsl_driver 1107#define PLATFORM_DRIVER ehci_fsl_driver
1106#endif 1108#endif
1107 1109
1110#ifdef CONFIG_USB_EHCI_MXC
1111#include "ehci-mxc.c"
1112#define PLATFORM_DRIVER ehci_mxc_driver
1113#endif
1114
1108#ifdef CONFIG_SOC_AU1200 1115#ifdef CONFIG_SOC_AU1200
1109#include "ehci-au1xxx.c" 1116#include "ehci-au1xxx.c"
1110#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 1117#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
1111#endif 1118#endif
1112 1119
1120#ifdef CONFIG_ARCH_OMAP34XX
1121#include "ehci-omap.c"
1122#define PLATFORM_DRIVER ehci_hcd_omap_driver
1123#endif
1124
1113#ifdef CONFIG_PPC_PS3 1125#ifdef CONFIG_PPC_PS3
1114#include "ehci-ps3.c" 1126#include "ehci-ps3.c"
1115#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver 1127#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
@@ -1120,6 +1132,11 @@ MODULE_LICENSE ("GPL");
1120#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver 1132#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
1121#endif 1133#endif
1122 1134
1135#ifdef CONFIG_XPS_USB_HCD_XILINX
1136#include "ehci-xilinx-of.c"
1137#define OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
1138#endif
1139
1123#ifdef CONFIG_PLAT_ORION 1140#ifdef CONFIG_PLAT_ORION
1124#include "ehci-orion.c" 1141#include "ehci-orion.c"
1125#define PLATFORM_DRIVER ehci_orion_driver 1142#define PLATFORM_DRIVER ehci_orion_driver
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 1b6f1c0e5cee..2c6571c05f35 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -236,7 +236,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
236 } 236 }
237 237
238 if (unlikely(ehci->debug)) { 238 if (unlikely(ehci->debug)) {
239 if (ehci->debug && !dbgp_reset_prep()) 239 if (!dbgp_reset_prep())
240 ehci->debug = NULL; 240 ehci->debug = NULL;
241 else 241 else
242 dbgp_external_startup(); 242 dbgp_external_startup();
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
new file mode 100644
index 000000000000..35c56f40bdbb
--- /dev/null
+++ b/drivers/usb/host/ehci-mxc.c
@@ -0,0 +1,296 @@
1/*
2 * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
3 * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/platform_device.h>
21#include <linux/clk.h>
22#include <linux/delay.h>
23#include <linux/usb/otg.h>
24
25#include <mach/mxc_ehci.h>
26
27#define ULPI_VIEWPORT_OFFSET 0x170
28#define PORTSC_OFFSET 0x184
29#define USBMODE_OFFSET 0x1a8
30#define USBMODE_CM_HOST 3
31
32struct ehci_mxc_priv {
33 struct clk *usbclk, *ahbclk;
34 struct usb_hcd *hcd;
35};
36
37/* called during probe() after chip reset completes */
38static int ehci_mxc_setup(struct usb_hcd *hcd)
39{
40 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
41 int retval;
42
43 /* EHCI registers start at offset 0x100 */
44 ehci->caps = hcd->regs + 0x100;
45 ehci->regs = hcd->regs + 0x100 +
46 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
47 dbg_hcs_params(ehci, "reset");
48 dbg_hcc_params(ehci, "reset");
49
50 /* cache this readonly data; minimize chip reads */
51 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
52
53 retval = ehci_halt(ehci);
54 if (retval)
55 return retval;
56
57 /* data structure init */
58 retval = ehci_init(hcd);
59 if (retval)
60 return retval;
61
62 hcd->has_tt = 1;
63
64 ehci->sbrn = 0x20;
65
66 ehci_reset(ehci);
67
68 ehci_port_power(ehci, 0);
69 return 0;
70}
71
72static const struct hc_driver ehci_mxc_hc_driver = {
73 .description = hcd_name,
74 .product_desc = "Freescale On-Chip EHCI Host Controller",
75 .hcd_priv_size = sizeof(struct ehci_hcd),
76
77 /*
78 * generic hardware linkage
79 */
80 .irq = ehci_irq,
81 .flags = HCD_USB2 | HCD_MEMORY,
82
83 /*
84 * basic lifecycle operations
85 */
86 .reset = ehci_mxc_setup,
87 .start = ehci_run,
88 .stop = ehci_stop,
89 .shutdown = ehci_shutdown,
90
91 /*
92 * managing i/o requests and associated device resources
93 */
94 .urb_enqueue = ehci_urb_enqueue,
95 .urb_dequeue = ehci_urb_dequeue,
96 .endpoint_disable = ehci_endpoint_disable,
97
98 /*
99 * scheduling support
100 */
101 .get_frame_number = ehci_get_frame,
102
103 /*
104 * root hub support
105 */
106 .hub_status_data = ehci_hub_status_data,
107 .hub_control = ehci_hub_control,
108 .bus_suspend = ehci_bus_suspend,
109 .bus_resume = ehci_bus_resume,
110 .relinquish_port = ehci_relinquish_port,
111 .port_handed_over = ehci_port_handed_over,
112};
113
114static int ehci_mxc_drv_probe(struct platform_device *pdev)
115{
116 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
117 struct usb_hcd *hcd;
118 struct resource *res;
119 int irq, ret, temp;
120 struct ehci_mxc_priv *priv;
121 struct device *dev = &pdev->dev;
122
123 dev_info(&pdev->dev, "initializing i.MX USB Controller\n");
124
125 if (!pdata) {
126 dev_err(dev, "No platform data given, bailing out.\n");
127 return -EINVAL;
128 }
129
130 irq = platform_get_irq(pdev, 0);
131
132 hcd = usb_create_hcd(&ehci_mxc_hc_driver, dev, dev_name(dev));
133 if (!hcd)
134 return -ENOMEM;
135
136 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
137 if (!priv) {
138 ret = -ENOMEM;
139 goto err_alloc;
140 }
141
142 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
143 if (!res) {
144 dev_err(dev, "Found HC with no register addr. Check setup!\n");
145 ret = -ENODEV;
146 goto err_get_resource;
147 }
148
149 hcd->rsrc_start = res->start;
150 hcd->rsrc_len = resource_size(res);
151
152 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
153 dev_dbg(dev, "controller already in use\n");
154 ret = -EBUSY;
155 goto err_request_mem;
156 }
157
158 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
159 if (!hcd->regs) {
160 dev_err(dev, "error mapping memory\n");
161 ret = -EFAULT;
162 goto err_ioremap;
163 }
164
165 /* enable clocks */
166 priv->usbclk = clk_get(dev, "usb");
167 if (IS_ERR(priv->usbclk)) {
168 ret = PTR_ERR(priv->usbclk);
169 goto err_clk;
170 }
171 clk_enable(priv->usbclk);
172
173 if (!cpu_is_mx35()) {
174 priv->ahbclk = clk_get(dev, "usb_ahb");
175 if (IS_ERR(priv->ahbclk)) {
176 ret = PTR_ERR(priv->ahbclk);
177 goto err_clk_ahb;
178 }
179 clk_enable(priv->ahbclk);
180 }
181
182 /* set USBMODE to host mode */
183 temp = readl(hcd->regs + USBMODE_OFFSET);
184 writel(temp | USBMODE_CM_HOST, hcd->regs + USBMODE_OFFSET);
185
186 /* set up the PORTSCx register */
187 writel(pdata->portsc, hcd->regs + PORTSC_OFFSET);
188 mdelay(10);
189
190 /* setup USBCONTROL. */
191 ret = mxc_set_usbcontrol(pdev->id, pdata->flags);
192 if (ret < 0)
193 goto err_init;
194
195 /* call platform specific init function */
196 if (pdata->init) {
197 ret = pdata->init(pdev);
198 if (ret) {
199 dev_err(dev, "platform init failed\n");
200 goto err_init;
201 }
202 }
203
204 /* most platforms need some time to settle changed IO settings */
205 mdelay(10);
206
207 /* Initialize the transceiver */
208 if (pdata->otg) {
209 pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
210 if (otg_init(pdata->otg) != 0)
211 dev_err(dev, "unable to init transceiver\n");
212 else if (otg_set_vbus(pdata->otg, 1) != 0)
213 dev_err(dev, "unable to enable vbus on transceiver\n");
214 }
215
216 priv->hcd = hcd;
217 platform_set_drvdata(pdev, priv);
218
219 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
220 if (ret)
221 goto err_add;
222
223 return 0;
224
225err_add:
226 if (pdata && pdata->exit)
227 pdata->exit(pdev);
228err_init:
229 if (priv->ahbclk) {
230 clk_disable(priv->ahbclk);
231 clk_put(priv->ahbclk);
232 }
233err_clk_ahb:
234 clk_disable(priv->usbclk);
235 clk_put(priv->usbclk);
236err_clk:
237 iounmap(hcd->regs);
238err_ioremap:
239 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
240err_request_mem:
241err_get_resource:
242 kfree(priv);
243err_alloc:
244 usb_put_hcd(hcd);
245 return ret;
246}
247
248static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
249{
250 struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
251 struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
252 struct usb_hcd *hcd = priv->hcd;
253
254 if (pdata && pdata->exit)
255 pdata->exit(pdev);
256
257 if (pdata->otg)
258 otg_shutdown(pdata->otg);
259
260 usb_remove_hcd(hcd);
261 iounmap(hcd->regs);
262 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
263 usb_put_hcd(hcd);
264 platform_set_drvdata(pdev, NULL);
265
266 clk_disable(priv->usbclk);
267 clk_put(priv->usbclk);
268 if (priv->ahbclk) {
269 clk_disable(priv->ahbclk);
270 clk_put(priv->ahbclk);
271 }
272
273 kfree(priv);
274
275 return 0;
276}
277
278static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
279{
280 struct ehci_mxc_priv *priv = platform_get_drvdata(pdev);
281 struct usb_hcd *hcd = priv->hcd;
282
283 if (hcd->driver->shutdown)
284 hcd->driver->shutdown(hcd);
285}
286
287MODULE_ALIAS("platform:mxc-ehci");
288
289static struct platform_driver ehci_mxc_driver = {
290 .probe = ehci_mxc_drv_probe,
291 .remove = __exit_p(ehci_mxc_drv_remove),
292 .shutdown = ehci_mxc_drv_shutdown,
293 .driver = {
294 .name = "mxc-ehci",
295 },
296};
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
new file mode 100644
index 000000000000..12f1ad2fd0e8
--- /dev/null
+++ b/drivers/usb/host/ehci-omap.c
@@ -0,0 +1,756 @@
1/*
2 * ehci-omap.c - driver for USBHOST on OMAP 34xx processor
3 *
4 * Bus Glue for OMAP34xx USBHOST 3 port EHCI controller
5 * Tested on OMAP3430 ES2.0 SDP
6 *
7 * Copyright (C) 2007-2008 Texas Instruments, Inc.
8 * Author: Vikram Pandita <vikram.pandita@ti.com>
9 *
10 * Copyright (C) 2009 Nokia Corporation
11 * Contact: Felipe Balbi <felipe.balbi@nokia.com>
12 *
13 * Based on "ehci-fsl.c" and "ehci-au1xxx.c" ehci glue layers
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 *
29 * TODO (last updated Feb 23rd, 2009):
30 * - add kernel-doc
31 * - enable AUTOIDLE
32 * - move DPLL5 programming to clock fw
33 * - add suspend/resume
34 * - move workarounds to board-files
35 */
36
37#include <linux/platform_device.h>
38#include <linux/clk.h>
39#include <linux/gpio.h>
40#include <mach/usb.h>
41
42/*
43 * OMAP USBHOST Register addresses: VIRTUAL ADDRESSES
44 * Use ehci_omap_readl()/ehci_omap_writel() functions
45 */
46
47/* TLL Register Set */
48#define OMAP_USBTLL_REVISION (0x00)
49#define OMAP_USBTLL_SYSCONFIG (0x10)
50#define OMAP_USBTLL_SYSCONFIG_CACTIVITY (1 << 8)
51#define OMAP_USBTLL_SYSCONFIG_SIDLEMODE (1 << 3)
52#define OMAP_USBTLL_SYSCONFIG_ENAWAKEUP (1 << 2)
53#define OMAP_USBTLL_SYSCONFIG_SOFTRESET (1 << 1)
54#define OMAP_USBTLL_SYSCONFIG_AUTOIDLE (1 << 0)
55
56#define OMAP_USBTLL_SYSSTATUS (0x14)
57#define OMAP_USBTLL_SYSSTATUS_RESETDONE (1 << 0)
58
59#define OMAP_USBTLL_IRQSTATUS (0x18)
60#define OMAP_USBTLL_IRQENABLE (0x1C)
61
62#define OMAP_TLL_SHARED_CONF (0x30)
63#define OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN (1 << 6)
64#define OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN (1 << 5)
65#define OMAP_TLL_SHARED_CONF_USB_DIVRATION (1 << 2)
66#define OMAP_TLL_SHARED_CONF_FCLK_REQ (1 << 1)
67#define OMAP_TLL_SHARED_CONF_FCLK_IS_ON (1 << 0)
68
69#define OMAP_TLL_CHANNEL_CONF(num) (0x040 + 0x004 * num)
70#define OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF (1 << 11)
71#define OMAP_TLL_CHANNEL_CONF_ULPI_ULPIAUTOIDLE (1 << 10)
72#define OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE (1 << 9)
73#define OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE (1 << 8)
74#define OMAP_TLL_CHANNEL_CONF_CHANEN (1 << 0)
75
76#define OMAP_TLL_ULPI_FUNCTION_CTRL(num) (0x804 + 0x100 * num)
77#define OMAP_TLL_ULPI_INTERFACE_CTRL(num) (0x807 + 0x100 * num)
78#define OMAP_TLL_ULPI_OTG_CTRL(num) (0x80A + 0x100 * num)
79#define OMAP_TLL_ULPI_INT_EN_RISE(num) (0x80D + 0x100 * num)
80#define OMAP_TLL_ULPI_INT_EN_FALL(num) (0x810 + 0x100 * num)
81#define OMAP_TLL_ULPI_INT_STATUS(num) (0x813 + 0x100 * num)
82#define OMAP_TLL_ULPI_INT_LATCH(num) (0x814 + 0x100 * num)
83#define OMAP_TLL_ULPI_DEBUG(num) (0x815 + 0x100 * num)
84#define OMAP_TLL_ULPI_SCRATCH_REGISTER(num) (0x816 + 0x100 * num)
85
86#define OMAP_TLL_CHANNEL_COUNT 3
87#define OMAP_TLL_CHANNEL_1_EN_MASK (1 << 1)
88#define OMAP_TLL_CHANNEL_2_EN_MASK (1 << 2)
89#define OMAP_TLL_CHANNEL_3_EN_MASK (1 << 4)
90
91/* UHH Register Set */
92#define OMAP_UHH_REVISION (0x00)
93#define OMAP_UHH_SYSCONFIG (0x10)
94#define OMAP_UHH_SYSCONFIG_MIDLEMODE (1 << 12)
95#define OMAP_UHH_SYSCONFIG_CACTIVITY (1 << 8)
96#define OMAP_UHH_SYSCONFIG_SIDLEMODE (1 << 3)
97#define OMAP_UHH_SYSCONFIG_ENAWAKEUP (1 << 2)
98#define OMAP_UHH_SYSCONFIG_SOFTRESET (1 << 1)
99#define OMAP_UHH_SYSCONFIG_AUTOIDLE (1 << 0)
100
101#define OMAP_UHH_SYSSTATUS (0x14)
102#define OMAP_UHH_HOSTCONFIG (0x40)
103#define OMAP_UHH_HOSTCONFIG_ULPI_BYPASS (1 << 0)
104#define OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS (1 << 0)
105#define OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS (1 << 11)
106#define OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS (1 << 12)
107#define OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN (1 << 2)
108#define OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN (1 << 3)
109#define OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN (1 << 4)
110#define OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN (1 << 5)
111#define OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS (1 << 8)
112#define OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS (1 << 9)
113#define OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS (1 << 10)
114
115#define OMAP_UHH_DEBUG_CSR (0x44)
116
117/* EHCI Register Set */
118#define EHCI_INSNREG05_ULPI (0xA4)
119#define EHCI_INSNREG05_ULPI_CONTROL_SHIFT 31
120#define EHCI_INSNREG05_ULPI_PORTSEL_SHIFT 24
121#define EHCI_INSNREG05_ULPI_OPSEL_SHIFT 22
122#define EHCI_INSNREG05_ULPI_REGADD_SHIFT 16
123#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
124#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
125
126/*-------------------------------------------------------------------------*/
127
128static inline void ehci_omap_writel(void __iomem *base, u32 reg, u32 val)
129{
130 __raw_writel(val, base + reg);
131}
132
133static inline u32 ehci_omap_readl(void __iomem *base, u32 reg)
134{
135 return __raw_readl(base + reg);
136}
137
138static inline void ehci_omap_writeb(void __iomem *base, u8 reg, u8 val)
139{
140 __raw_writeb(val, base + reg);
141}
142
143static inline u8 ehci_omap_readb(void __iomem *base, u8 reg)
144{
145 return __raw_readb(base + reg);
146}
147
148/*-------------------------------------------------------------------------*/
149
150struct ehci_hcd_omap {
151 struct ehci_hcd *ehci;
152 struct device *dev;
153
154 struct clk *usbhost_ick;
155 struct clk *usbhost2_120m_fck;
156 struct clk *usbhost1_48m_fck;
157 struct clk *usbtll_fck;
158 struct clk *usbtll_ick;
159
160 /* FIXME the following two workarounds are
161 * board specific not silicon-specific so these
162 * should be moved to board-file instead.
163 *
164 * Maybe someone from TI will know better which
165 * board is affected and needs the workarounds
166 * to be applied
167 */
168
169 /* gpio for resetting phy */
170 int reset_gpio_port[OMAP3_HS_USB_PORTS];
171
172 /* phy reset workaround */
173 int phy_reset;
174
175 /* desired phy_mode: TLL, PHY */
176 enum ehci_hcd_omap_mode port_mode[OMAP3_HS_USB_PORTS];
177
178 void __iomem *uhh_base;
179 void __iomem *tll_base;
180 void __iomem *ehci_base;
181};
182
183/*-------------------------------------------------------------------------*/
184
185static void omap_usb_utmi_init(struct ehci_hcd_omap *omap, u8 tll_channel_mask)
186{
187 unsigned reg;
188 int i;
189
190 /* Program the 3 TLL channels upfront */
191 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
192 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
193
194 /* Disable AutoIdle, BitStuffing and use SDR Mode */
195 reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
196 | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
197 | OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
198 ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
199 }
200
201 /* Program Common TLL register */
202 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_SHARED_CONF);
203 reg |= (OMAP_TLL_SHARED_CONF_FCLK_IS_ON
204 | OMAP_TLL_SHARED_CONF_USB_DIVRATION
205 | OMAP_TLL_SHARED_CONF_USB_180D_SDR_EN);
206 reg &= ~OMAP_TLL_SHARED_CONF_USB_90D_DDR_EN;
207
208 ehci_omap_writel(omap->tll_base, OMAP_TLL_SHARED_CONF, reg);
209
210 /* Enable channels now */
211 for (i = 0; i < OMAP_TLL_CHANNEL_COUNT; i++) {
212 reg = ehci_omap_readl(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i));
213
214 /* Enable only the reg that is needed */
215 if (!(tll_channel_mask & 1<<i))
216 continue;
217
218 reg |= OMAP_TLL_CHANNEL_CONF_CHANEN;
219 ehci_omap_writel(omap->tll_base, OMAP_TLL_CHANNEL_CONF(i), reg);
220
221 ehci_omap_writeb(omap->tll_base,
222 OMAP_TLL_ULPI_SCRATCH_REGISTER(i), 0xbe);
223 dev_dbg(omap->dev, "ULPI_SCRATCH_REG[ch=%d]= 0x%02x\n",
224 i+1, ehci_omap_readb(omap->tll_base,
225 OMAP_TLL_ULPI_SCRATCH_REGISTER(i)));
226 }
227}
228
229/*-------------------------------------------------------------------------*/
230
231/* omap_start_ehc
232 * - Start the TI USBHOST controller
233 */
234static int omap_start_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
235{
236 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
237 u8 tll_ch_mask = 0;
238 unsigned reg = 0;
239 int ret = 0;
240
241 dev_dbg(omap->dev, "starting TI EHCI USB Controller\n");
242
243 /* Enable Clocks for USBHOST */
244 omap->usbhost_ick = clk_get(omap->dev, "usbhost_ick");
245 if (IS_ERR(omap->usbhost_ick)) {
246 ret = PTR_ERR(omap->usbhost_ick);
247 goto err_host_ick;
248 }
249 clk_enable(omap->usbhost_ick);
250
251 omap->usbhost2_120m_fck = clk_get(omap->dev, "usbhost_120m_fck");
252 if (IS_ERR(omap->usbhost2_120m_fck)) {
253 ret = PTR_ERR(omap->usbhost2_120m_fck);
254 goto err_host_120m_fck;
255 }
256 clk_enable(omap->usbhost2_120m_fck);
257
258 omap->usbhost1_48m_fck = clk_get(omap->dev, "usbhost_48m_fck");
259 if (IS_ERR(omap->usbhost1_48m_fck)) {
260 ret = PTR_ERR(omap->usbhost1_48m_fck);
261 goto err_host_48m_fck;
262 }
263 clk_enable(omap->usbhost1_48m_fck);
264
265 if (omap->phy_reset) {
266 /* Refer: ISSUE1 */
267 if (gpio_is_valid(omap->reset_gpio_port[0])) {
268 gpio_request(omap->reset_gpio_port[0],
269 "USB1 PHY reset");
270 gpio_direction_output(omap->reset_gpio_port[0], 0);
271 }
272
273 if (gpio_is_valid(omap->reset_gpio_port[1])) {
274 gpio_request(omap->reset_gpio_port[1],
275 "USB2 PHY reset");
276 gpio_direction_output(omap->reset_gpio_port[1], 0);
277 }
278
279 /* Hold the PHY in RESET for enough time till DIR is high */
280 udelay(10);
281 }
282
283 /* Configure TLL for 60Mhz clk for ULPI */
284 omap->usbtll_fck = clk_get(omap->dev, "usbtll_fck");
285 if (IS_ERR(omap->usbtll_fck)) {
286 ret = PTR_ERR(omap->usbtll_fck);
287 goto err_tll_fck;
288 }
289 clk_enable(omap->usbtll_fck);
290
291 omap->usbtll_ick = clk_get(omap->dev, "usbtll_ick");
292 if (IS_ERR(omap->usbtll_ick)) {
293 ret = PTR_ERR(omap->usbtll_ick);
294 goto err_tll_ick;
295 }
296 clk_enable(omap->usbtll_ick);
297
298 /* perform TLL soft reset, and wait until reset is complete */
299 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
300 OMAP_USBTLL_SYSCONFIG_SOFTRESET);
301
302 /* Wait for TLL reset to complete */
303 while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
304 & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
305 cpu_relax();
306
307 if (time_after(jiffies, timeout)) {
308 dev_dbg(omap->dev, "operation timed out\n");
309 ret = -EINVAL;
310 goto err_sys_status;
311 }
312 }
313
314 dev_dbg(omap->dev, "TLL RESET DONE\n");
315
316 /* (1<<3) = no idle mode only for initial debugging */
317 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
318 OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
319 OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
320 OMAP_USBTLL_SYSCONFIG_CACTIVITY);
321
322
323 /* Put UHH in NoIdle/NoStandby mode */
324 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSCONFIG);
325 reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
326 | OMAP_UHH_SYSCONFIG_SIDLEMODE
327 | OMAP_UHH_SYSCONFIG_CACTIVITY
328 | OMAP_UHH_SYSCONFIG_MIDLEMODE);
329 reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
330
331 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
332
333 reg = ehci_omap_readl(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
334
335 /* setup ULPI bypass and burst configurations */
336 reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
337 | OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
338 | OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
339 reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;
340
341 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_UNKNOWN)
342 reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
343 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_UNKNOWN)
344 reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
345 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_UNKNOWN)
346 reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;
347
348 /* Bypass the TLL module for PHY mode operation */
349 if (omap_rev() <= OMAP3430_REV_ES2_1) {
350 dev_dbg(omap->dev, "OMAP3 ES version <= ES2.1 \n");
351 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY) ||
352 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY) ||
353 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY))
354 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
355 else
356 reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
357 } else {
358 dev_dbg(omap->dev, "OMAP3 ES version > ES2.1\n");
359 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_PHY)
360 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
361 else if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
362 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
363
364 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_PHY)
365 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
366 else if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
367 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
368
369 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_PHY)
370 reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
371 else if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
372 reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
373
374 }
375 ehci_omap_writel(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
376 dev_dbg(omap->dev, "UHH setup done, uhh_hostconfig=%x\n", reg);
377
378
379 if ((omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL) ||
380 (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL) ||
381 (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)) {
382
383 if (omap->port_mode[0] == EHCI_HCD_OMAP_MODE_TLL)
384 tll_ch_mask |= OMAP_TLL_CHANNEL_1_EN_MASK;
385 if (omap->port_mode[1] == EHCI_HCD_OMAP_MODE_TLL)
386 tll_ch_mask |= OMAP_TLL_CHANNEL_2_EN_MASK;
387 if (omap->port_mode[2] == EHCI_HCD_OMAP_MODE_TLL)
388 tll_ch_mask |= OMAP_TLL_CHANNEL_3_EN_MASK;
389
390 /* Enable UTMI mode for required TLL channels */
391 omap_usb_utmi_init(omap, tll_ch_mask);
392 }
393
394 if (omap->phy_reset) {
395 /* Refer ISSUE1:
396 * Hold the PHY in RESET for enough time till
397 * PHY is settled and ready
398 */
399 udelay(10);
400
401 if (gpio_is_valid(omap->reset_gpio_port[0]))
402 gpio_set_value(omap->reset_gpio_port[0], 1);
403
404 if (gpio_is_valid(omap->reset_gpio_port[1]))
405 gpio_set_value(omap->reset_gpio_port[1], 1);
406 }
407
408 return 0;
409
410err_sys_status:
411 clk_disable(omap->usbtll_ick);
412 clk_put(omap->usbtll_ick);
413
414err_tll_ick:
415 clk_disable(omap->usbtll_fck);
416 clk_put(omap->usbtll_fck);
417
418err_tll_fck:
419 clk_disable(omap->usbhost1_48m_fck);
420 clk_put(omap->usbhost1_48m_fck);
421
422 if (omap->phy_reset) {
423 if (gpio_is_valid(omap->reset_gpio_port[0]))
424 gpio_free(omap->reset_gpio_port[0]);
425
426 if (gpio_is_valid(omap->reset_gpio_port[1]))
427 gpio_free(omap->reset_gpio_port[1]);
428 }
429
430err_host_48m_fck:
431 clk_disable(omap->usbhost2_120m_fck);
432 clk_put(omap->usbhost2_120m_fck);
433
434err_host_120m_fck:
435 clk_disable(omap->usbhost_ick);
436 clk_put(omap->usbhost_ick);
437
438err_host_ick:
439 return ret;
440}
441
442static void omap_stop_ehc(struct ehci_hcd_omap *omap, struct usb_hcd *hcd)
443{
444 unsigned long timeout = jiffies + msecs_to_jiffies(100);
445
446 dev_dbg(omap->dev, "stopping TI EHCI USB Controller\n");
447
448 /* Reset OMAP modules for insmod/rmmod to work */
449 ehci_omap_writel(omap->uhh_base, OMAP_UHH_SYSCONFIG,
450 OMAP_UHH_SYSCONFIG_SOFTRESET);
451 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
452 & (1 << 0))) {
453 cpu_relax();
454
455 if (time_after(jiffies, timeout))
456 dev_dbg(omap->dev, "operation timed out\n");
457 }
458
459 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
460 & (1 << 1))) {
461 cpu_relax();
462
463 if (time_after(jiffies, timeout))
464 dev_dbg(omap->dev, "operation timed out\n");
465 }
466
467 while (!(ehci_omap_readl(omap->uhh_base, OMAP_UHH_SYSSTATUS)
468 & (1 << 2))) {
469 cpu_relax();
470
471 if (time_after(jiffies, timeout))
472 dev_dbg(omap->dev, "operation timed out\n");
473 }
474
475 ehci_omap_writel(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
476
477 while (!(ehci_omap_readl(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
478 & (1 << 0))) {
479 cpu_relax();
480
481 if (time_after(jiffies, timeout))
482 dev_dbg(omap->dev, "operation timed out\n");
483 }
484
485 if (omap->usbtll_fck != NULL) {
486 clk_disable(omap->usbtll_fck);
487 clk_put(omap->usbtll_fck);
488 omap->usbtll_fck = NULL;
489 }
490
491 if (omap->usbhost_ick != NULL) {
492 clk_disable(omap->usbhost_ick);
493 clk_put(omap->usbhost_ick);
494 omap->usbhost_ick = NULL;
495 }
496
497 if (omap->usbhost1_48m_fck != NULL) {
498 clk_disable(omap->usbhost1_48m_fck);
499 clk_put(omap->usbhost1_48m_fck);
500 omap->usbhost1_48m_fck = NULL;
501 }
502
503 if (omap->usbhost2_120m_fck != NULL) {
504 clk_disable(omap->usbhost2_120m_fck);
505 clk_put(omap->usbhost2_120m_fck);
506 omap->usbhost2_120m_fck = NULL;
507 }
508
509 if (omap->usbtll_ick != NULL) {
510 clk_disable(omap->usbtll_ick);
511 clk_put(omap->usbtll_ick);
512 omap->usbtll_ick = NULL;
513 }
514
515 if (omap->phy_reset) {
516 if (gpio_is_valid(omap->reset_gpio_port[0]))
517 gpio_free(omap->reset_gpio_port[0]);
518
519 if (gpio_is_valid(omap->reset_gpio_port[1]))
520 gpio_free(omap->reset_gpio_port[1]);
521 }
522
523 dev_dbg(omap->dev, "Clock to USB host has been disabled\n");
524}
525
526/*-------------------------------------------------------------------------*/
527
528static const struct hc_driver ehci_omap_hc_driver;
529
530/* configure so an HC device and id are always provided */
531/* always called with process context; sleeping is OK */
532
533/**
534 * ehci_hcd_omap_probe - initialize TI-based HCDs
535 *
536 * Allocates basic resources for this USB host controller, and
537 * then invokes the start() method for the HCD associated with it
538 * through the hotplug entry's driver_data.
539 */
540static int ehci_hcd_omap_probe(struct platform_device *pdev)
541{
542 struct ehci_hcd_omap_platform_data *pdata = pdev->dev.platform_data;
543 struct ehci_hcd_omap *omap;
544 struct resource *res;
545 struct usb_hcd *hcd;
546
547 int irq = platform_get_irq(pdev, 0);
548 int ret = -ENODEV;
549
550 if (!pdata) {
551 dev_dbg(&pdev->dev, "missing platform_data\n");
552 goto err_pdata;
553 }
554
555 if (usb_disabled())
556 goto err_disabled;
557
558 omap = kzalloc(sizeof(*omap), GFP_KERNEL);
559 if (!omap) {
560 ret = -ENOMEM;
561 goto err_disabled;
562 }
563
564 hcd = usb_create_hcd(&ehci_omap_hc_driver, &pdev->dev,
565 dev_name(&pdev->dev));
566 if (!hcd) {
567 dev_dbg(&pdev->dev, "failed to create hcd with err %d\n", ret);
568 ret = -ENOMEM;
569 goto err_create_hcd;
570 }
571
572 platform_set_drvdata(pdev, omap);
573 omap->dev = &pdev->dev;
574 omap->phy_reset = pdata->phy_reset;
575 omap->reset_gpio_port[0] = pdata->reset_gpio_port[0];
576 omap->reset_gpio_port[1] = pdata->reset_gpio_port[1];
577 omap->reset_gpio_port[2] = pdata->reset_gpio_port[2];
578 omap->port_mode[0] = pdata->port_mode[0];
579 omap->port_mode[1] = pdata->port_mode[1];
580 omap->port_mode[2] = pdata->port_mode[2];
581 omap->ehci = hcd_to_ehci(hcd);
582 omap->ehci->sbrn = 0x20;
583
584 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
585
586 hcd->rsrc_start = res->start;
587 hcd->rsrc_len = resource_size(res);
588
589 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
590 if (!hcd->regs) {
591 dev_err(&pdev->dev, "EHCI ioremap failed\n");
592 ret = -ENOMEM;
593 goto err_ioremap;
594 }
595
596 /* we know this is the memory we want, no need to ioremap again */
597 omap->ehci->caps = hcd->regs;
598 omap->ehci_base = hcd->regs;
599
600 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
601 omap->uhh_base = ioremap(res->start, resource_size(res));
602 if (!omap->uhh_base) {
603 dev_err(&pdev->dev, "UHH ioremap failed\n");
604 ret = -ENOMEM;
605 goto err_uhh_ioremap;
606 }
607
608 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
609 omap->tll_base = ioremap(res->start, resource_size(res));
610 if (!omap->tll_base) {
611 dev_err(&pdev->dev, "TLL ioremap failed\n");
612 ret = -ENOMEM;
613 goto err_tll_ioremap;
614 }
615
616 ret = omap_start_ehc(omap, hcd);
617 if (ret) {
618 dev_dbg(&pdev->dev, "failed to start ehci\n");
619 goto err_start;
620 }
621
622 omap->ehci->regs = hcd->regs
623 + HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
624
625 /* cache this readonly data; minimize chip reads */
626 omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
627
628 /* SET 1 micro-frame Interrupt interval */
629 writel(readl(&omap->ehci->regs->command) | (1 << 16),
630 &omap->ehci->regs->command);
631
632 ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
633 if (ret) {
634 dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
635 goto err_add_hcd;
636 }
637
638 return 0;
639
640err_add_hcd:
641 omap_stop_ehc(omap, hcd);
642
643err_start:
644 iounmap(omap->tll_base);
645
646err_tll_ioremap:
647 iounmap(omap->uhh_base);
648
649err_uhh_ioremap:
650 iounmap(hcd->regs);
651
652err_ioremap:
653 usb_put_hcd(hcd);
654
655err_create_hcd:
656 kfree(omap);
657err_disabled:
658err_pdata:
659 return ret;
660}
661
662/* may be called without controller electrically present */
663/* may be called with controller, bus, and devices active */
664
665/**
666 * ehci_hcd_omap_remove - shutdown processing for EHCI HCDs
667 * @pdev: USB Host Controller being removed
668 *
669 * Reverses the effect of usb_ehci_hcd_omap_probe(), first invoking
670 * the HCD's stop() method. It is always called from a thread
671 * context, normally "rmmod", "apmd", or something similar.
672 */
673static int ehci_hcd_omap_remove(struct platform_device *pdev)
674{
675 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
676 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
677
678 usb_remove_hcd(hcd);
679 omap_stop_ehc(omap, hcd);
680 iounmap(hcd->regs);
681 iounmap(omap->tll_base);
682 iounmap(omap->uhh_base);
683 usb_put_hcd(hcd);
684
685 return 0;
686}
687
688static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
689{
690 struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
691 struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
692
693 if (hcd->driver->shutdown)
694 hcd->driver->shutdown(hcd);
695}
696
697static struct platform_driver ehci_hcd_omap_driver = {
698 .probe = ehci_hcd_omap_probe,
699 .remove = ehci_hcd_omap_remove,
700 .shutdown = ehci_hcd_omap_shutdown,
701 /*.suspend = ehci_hcd_omap_suspend, */
702 /*.resume = ehci_hcd_omap_resume, */
703 .driver = {
704 .name = "ehci-omap",
705 }
706};
707
708/*-------------------------------------------------------------------------*/
709
710static const struct hc_driver ehci_omap_hc_driver = {
711 .description = hcd_name,
712 .product_desc = "OMAP-EHCI Host Controller",
713 .hcd_priv_size = sizeof(struct ehci_hcd),
714
715 /*
716 * generic hardware linkage
717 */
718 .irq = ehci_irq,
719 .flags = HCD_MEMORY | HCD_USB2,
720
721 /*
722 * basic lifecycle operations
723 */
724 .reset = ehci_init,
725 .start = ehci_run,
726 .stop = ehci_stop,
727 .shutdown = ehci_shutdown,
728
729 /*
730 * managing i/o requests and associated device resources
731 */
732 .urb_enqueue = ehci_urb_enqueue,
733 .urb_dequeue = ehci_urb_dequeue,
734 .endpoint_disable = ehci_endpoint_disable,
735 .endpoint_reset = ehci_endpoint_reset,
736
737 /*
738 * scheduling support
739 */
740 .get_frame_number = ehci_get_frame,
741
742 /*
743 * root hub support
744 */
745 .hub_status_data = ehci_hub_status_data,
746 .hub_control = ehci_hub_control,
747 .bus_suspend = ehci_bus_suspend,
748 .bus_resume = ehci_bus_resume,
749
750 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
751};
752
753MODULE_ALIAS("platform:omap-ehci");
754MODULE_AUTHOR("Texas Instruments, Inc.");
755MODULE_AUTHOR("Felipe Balbi <felipe.balbi@nokia.com>");
756
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 139a2cc3f641..a427d3b00634 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -616,9 +616,11 @@ qh_urb_transaction (
616) { 616) {
617 struct ehci_qtd *qtd, *qtd_prev; 617 struct ehci_qtd *qtd, *qtd_prev;
618 dma_addr_t buf; 618 dma_addr_t buf;
619 int len, maxpacket; 619 int len, this_sg_len, maxpacket;
620 int is_input; 620 int is_input;
621 u32 token; 621 u32 token;
622 int i;
623 struct scatterlist *sg;
622 624
623 /* 625 /*
624 * URBs map to sequences of QTDs: one logical transaction 626 * URBs map to sequences of QTDs: one logical transaction
@@ -659,7 +661,20 @@ qh_urb_transaction (
659 /* 661 /*
660 * data transfer stage: buffer setup 662 * data transfer stage: buffer setup
661 */ 663 */
662 buf = urb->transfer_dma; 664 i = urb->num_sgs;
665 if (len > 0 && i > 0) {
666 sg = urb->sg->sg;
667 buf = sg_dma_address(sg);
668
669 /* urb->transfer_buffer_length may be smaller than the
670 * size of the scatterlist (or vice versa)
671 */
672 this_sg_len = min_t(int, sg_dma_len(sg), len);
673 } else {
674 sg = NULL;
675 buf = urb->transfer_dma;
676 this_sg_len = len;
677 }
663 678
664 if (is_input) 679 if (is_input)
665 token |= (1 /* "in" */ << 8); 680 token |= (1 /* "in" */ << 8);
@@ -675,7 +690,9 @@ qh_urb_transaction (
675 for (;;) { 690 for (;;) {
676 int this_qtd_len; 691 int this_qtd_len;
677 692
678 this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket); 693 this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token,
694 maxpacket);
695 this_sg_len -= this_qtd_len;
679 len -= this_qtd_len; 696 len -= this_qtd_len;
680 buf += this_qtd_len; 697 buf += this_qtd_len;
681 698
@@ -691,8 +708,13 @@ qh_urb_transaction (
691 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) 708 if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
692 token ^= QTD_TOGGLE; 709 token ^= QTD_TOGGLE;
693 710
694 if (likely (len <= 0)) 711 if (likely(this_sg_len <= 0)) {
695 break; 712 if (--i <= 0 || len <= 0)
713 break;
714 sg = sg_next(sg);
715 buf = sg_dma_address(sg);
716 this_sg_len = min_t(int, sg_dma_len(sg), len);
717 }
696 718
697 qtd_prev = qtd; 719 qtd_prev = qtd;
698 qtd = ehci_qtd_alloc (ehci, flags); 720 qtd = ehci_qtd_alloc (ehci, flags);
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a5535b5e3fe2..1e391e624c8a 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1385,7 +1385,7 @@ sitd_slot_ok (
1385 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! 1385 * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler!
1386 */ 1386 */
1387 1387
1388#define SCHEDULE_SLOP 10 /* frames */ 1388#define SCHEDULE_SLOP 80 /* microframes */
1389 1389
1390static int 1390static int
1391iso_stream_schedule ( 1391iso_stream_schedule (
@@ -1394,12 +1394,13 @@ iso_stream_schedule (
1394 struct ehci_iso_stream *stream 1394 struct ehci_iso_stream *stream
1395) 1395)
1396{ 1396{
1397 u32 now, start, max, period; 1397 u32 now, next, start, period;
1398 int status; 1398 int status;
1399 unsigned mod = ehci->periodic_size << 3; 1399 unsigned mod = ehci->periodic_size << 3;
1400 struct ehci_iso_sched *sched = urb->hcpriv; 1400 struct ehci_iso_sched *sched = urb->hcpriv;
1401 struct pci_dev *pdev;
1401 1402
1402 if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { 1403 if (sched->span > (mod - SCHEDULE_SLOP)) {
1403 ehci_dbg (ehci, "iso request %p too long\n", urb); 1404 ehci_dbg (ehci, "iso request %p too long\n", urb);
1404 status = -EFBIG; 1405 status = -EFBIG;
1405 goto fail; 1406 goto fail;
@@ -1418,26 +1419,35 @@ iso_stream_schedule (
1418 1419
1419 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod; 1420 now = ehci_readl(ehci, &ehci->regs->frame_index) % mod;
1420 1421
1421 /* when's the last uframe this urb could start? */
1422 max = now + mod;
1423
1424 /* Typical case: reuse current schedule, stream is still active. 1422 /* Typical case: reuse current schedule, stream is still active.
1425 * Hopefully there are no gaps from the host falling behind 1423 * Hopefully there are no gaps from the host falling behind
1426 * (irq delays etc), but if there are we'll take the next 1424 * (irq delays etc), but if there are we'll take the next
1427 * slot in the schedule, implicitly assuming URB_ISO_ASAP. 1425 * slot in the schedule, implicitly assuming URB_ISO_ASAP.
1428 */ 1426 */
1429 if (likely (!list_empty (&stream->td_list))) { 1427 if (likely (!list_empty (&stream->td_list))) {
1428 pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
1430 start = stream->next_uframe; 1429 start = stream->next_uframe;
1431 if (start < now) 1430
1432 start += mod; 1431 /* For high speed devices, allow scheduling within the
1432 * isochronous scheduling threshold. For full speed devices,
1433 * don't. (Work around for Intel ICH9 bug.)
1434 */
1435 if (!stream->highspeed &&
1436 pdev->vendor == PCI_VENDOR_ID_INTEL)
1437 next = now + ehci->i_thresh;
1438 else
1439 next = now;
1433 1440
1434 /* Fell behind (by up to twice the slop amount)? */ 1441 /* Fell behind (by up to twice the slop amount)? */
1435 if (start >= max - 2 * 8 * SCHEDULE_SLOP) 1442 if (((start - next) & (mod - 1)) >=
1443 mod - 2 * SCHEDULE_SLOP)
1436 start += period * DIV_ROUND_UP( 1444 start += period * DIV_ROUND_UP(
1437 max - start, period) - mod; 1445 (next - start) & (mod - 1),
1446 period);
1438 1447
1439 /* Tried to schedule too far into the future? */ 1448 /* Tried to schedule too far into the future? */
1440 if (unlikely((start + sched->span) >= max)) { 1449 if (unlikely(((start - now) & (mod - 1)) + sched->span
1450 >= mod - 2 * SCHEDULE_SLOP)) {
1441 status = -EFBIG; 1451 status = -EFBIG;
1442 goto fail; 1452 goto fail;
1443 } 1453 }
@@ -1451,7 +1461,7 @@ iso_stream_schedule (
1451 * can also help high bandwidth if the dma and irq loads don't 1461 * can also help high bandwidth if the dma and irq loads don't
1452 * jump until after the queue is primed. 1462 * jump until after the queue is primed.
1453 */ 1463 */
1454 start = SCHEDULE_SLOP * 8 + (now & ~0x07); 1464 start = SCHEDULE_SLOP + (now & ~0x07);
1455 start %= mod; 1465 start %= mod;
1456 stream->next_uframe = start; 1466 stream->next_uframe = start;
1457 1467
@@ -1482,7 +1492,7 @@ iso_stream_schedule (
1482 /* no room in the schedule */ 1492 /* no room in the schedule */
1483 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", 1493 ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n",
1484 list_empty (&stream->td_list) ? "" : "re", 1494 list_empty (&stream->td_list) ? "" : "re",
1485 urb, now, max); 1495 urb, now, now + mod);
1486 status = -ENOSPC; 1496 status = -ENOSPC;
1487 1497
1488fail: 1498fail:
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
new file mode 100644
index 000000000000..a5861531ad3e
--- /dev/null
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -0,0 +1,300 @@
1/*
2 * EHCI HCD (Host Controller Driver) for USB.
3 *
4 * Bus Glue for Xilinx EHCI core on the of_platform bus
5 *
6 * Copyright (c) 2009 Xilinx, Inc.
7 *
8 * Based on "ehci-ppc-of.c" by Valentine Barshak <vbarshak@ru.mvista.com>
9 * and "ehci-ppc-soc.c" by Stefan Roese <sr@denx.de>
10 * and "ohci-ppc-of.c" by Sylvain Munaut <tnt@246tNt.com>
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
19 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software Foundation,
24 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 */
27
28#include <linux/signal.h>
29
30#include <linux/of.h>
31#include <linux/of_platform.h>
32
33/**
34 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
35 * @hcd: Pointer to the usb_hcd device to which the host controller bound
36 *
37 * called during probe() after chip reset completes.
38 */
39static int ehci_xilinx_of_setup(struct usb_hcd *hcd)
40{
41 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
42 int retval;
43
44 retval = ehci_halt(ehci);
45 if (retval)
46 return retval;
47
48 retval = ehci_init(hcd);
49 if (retval)
50 return retval;
51
52 ehci->sbrn = 0x20;
53
54 return ehci_reset(ehci);
55}
56
57/**
58 * ehci_xilinx_port_handed_over - hand the port out if failed to enable it
59 * @hcd: Pointer to the usb_hcd device to which the host controller bound
60 * @portnum:Port number to which the device is attached.
61 *
62 * This function is used as a place to tell the user that the Xilinx USB host
63 * controller does support LS devices. And in an HS only configuration, it
64 * does not support FS devices either. It is hoped that this can help a
65 * confused user.
66 *
67 * There are cases when the host controller fails to enable the port due to,
68 * for example, insufficient power that can be supplied to the device from
69 * the USB bus. In those cases, the messages printed here are not helpful.
70 */
71static int ehci_xilinx_port_handed_over(struct usb_hcd *hcd, int portnum)
72{
73 dev_warn(hcd->self.controller, "port %d cannot be enabled\n", portnum);
74 if (hcd->has_tt) {
75 dev_warn(hcd->self.controller,
76 "Maybe you have connected a low speed device?\n");
77
78 dev_warn(hcd->self.controller,
79 "We do not support low speed devices\n");
80 } else {
81 dev_warn(hcd->self.controller,
82 "Maybe your device is not a high speed device?\n");
83 dev_warn(hcd->self.controller,
84 "The USB host controller does not support full speed "
85 "nor low speed devices\n");
86 dev_warn(hcd->self.controller,
87 "You can reconfigure the host controller to have "
88 "full speed support\n");
89 }
90
91 return 0;
92}
93
94
95static const struct hc_driver ehci_xilinx_of_hc_driver = {
96 .description = hcd_name,
97 .product_desc = "OF EHCI",
98 .hcd_priv_size = sizeof(struct ehci_hcd),
99
100 /*
101 * generic hardware linkage
102 */
103 .irq = ehci_irq,
104 .flags = HCD_MEMORY | HCD_USB2,
105
106 /*
107 * basic lifecycle operations
108 */
109 .reset = ehci_xilinx_of_setup,
110 .start = ehci_run,
111 .stop = ehci_stop,
112 .shutdown = ehci_shutdown,
113
114 /*
115 * managing i/o requests and associated device resources
116 */
117 .urb_enqueue = ehci_urb_enqueue,
118 .urb_dequeue = ehci_urb_dequeue,
119 .endpoint_disable = ehci_endpoint_disable,
120
121 /*
122 * scheduling support
123 */
124 .get_frame_number = ehci_get_frame,
125
126 /*
127 * root hub support
128 */
129 .hub_status_data = ehci_hub_status_data,
130 .hub_control = ehci_hub_control,
131#ifdef CONFIG_PM
132 .bus_suspend = ehci_bus_suspend,
133 .bus_resume = ehci_bus_resume,
134#endif
135 .relinquish_port = NULL,
136 .port_handed_over = ehci_xilinx_port_handed_over,
137
138 .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
139};
140
141/**
142 * ehci_hcd_xilinx_of_probe - Probe method for the USB host controller
143 * @op: pointer to the of_device to which the host controller bound
144 * @match: pointer to of_device_id structure, not used
145 *
146 * This function requests resources and sets up appropriate properties for the
147 * host controller. Because the Xilinx USB host controller can be configured
148 * as HS only or HS/FS only, it checks the configuration in the device tree
149 * entry, and sets an appropriate value for hcd->has_tt.
150 */
151static int __devinit
152ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
153{
154 struct device_node *dn = op->node;
155 struct usb_hcd *hcd;
156 struct ehci_hcd *ehci;
157 struct resource res;
158 int irq;
159 int rv;
160 int *value;
161
162 if (usb_disabled())
163 return -ENODEV;
164
165 dev_dbg(&op->dev, "initializing XILINX-OF USB Controller\n");
166
167 rv = of_address_to_resource(dn, 0, &res);
168 if (rv)
169 return rv;
170
171 hcd = usb_create_hcd(&ehci_xilinx_of_hc_driver, &op->dev,
172 "XILINX-OF USB");
173 if (!hcd)
174 return -ENOMEM;
175
176 hcd->rsrc_start = res.start;
177 hcd->rsrc_len = res.end - res.start + 1;
178
179 if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
180 printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
181 rv = -EBUSY;
182 goto err_rmr;
183 }
184
185 irq = irq_of_parse_and_map(dn, 0);
186 if (irq == NO_IRQ) {
187 printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
188 rv = -EBUSY;
189 goto err_irq;
190 }
191
192 hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
193 if (!hcd->regs) {
194 printk(KERN_ERR __FILE__ ": ioremap failed\n");
195 rv = -ENOMEM;
196 goto err_ioremap;
197 }
198
199 ehci = hcd_to_ehci(hcd);
200
201 /* This core always has big-endian register interface and uses
202 * big-endian memory descriptors.
203 */
204 ehci->big_endian_mmio = 1;
205 ehci->big_endian_desc = 1;
206
207 /* Check whether the FS support option is selected in the hardware.
208 */
209 value = (int *)of_get_property(dn, "xlnx,support-usb-fs", NULL);
210 if (value && (*value == 1)) {
211 ehci_dbg(ehci, "USB host controller supports FS devices\n");
212 hcd->has_tt = 1;
213 } else {
214 ehci_dbg(ehci,
215 "USB host controller is HS only\n");
216 hcd->has_tt = 0;
217 }
218
219 /* Debug registers are at the first 0x100 region
220 */
221 ehci->caps = hcd->regs + 0x100;
222 ehci->regs = hcd->regs + 0x100 +
223 HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
224
225 /* cache this readonly data; minimize chip reads */
226 ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
227
228 rv = usb_add_hcd(hcd, irq, 0);
229 if (rv == 0)
230 return 0;
231
232 iounmap(hcd->regs);
233
234err_ioremap:
235err_irq:
236 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
237err_rmr:
238 usb_put_hcd(hcd);
239
240 return rv;
241}
242
243/**
244 * ehci_hcd_xilinx_of_remove - shutdown hcd and release resources
245 * @op: pointer to of_device structure that is to be removed
246 *
247 * Remove the hcd structure, and release resources that has been requested
248 * during probe.
249 */
250static int ehci_hcd_xilinx_of_remove(struct of_device *op)
251{
252 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
253 dev_set_drvdata(&op->dev, NULL);
254
255 dev_dbg(&op->dev, "stopping XILINX-OF USB Controller\n");
256
257 usb_remove_hcd(hcd);
258
259 iounmap(hcd->regs);
260 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
261
262 usb_put_hcd(hcd);
263
264 return 0;
265}
266
267/**
268 * ehci_hcd_xilinx_of_shutdown - shutdown the hcd
269 * @op: pointer to of_device structure that is to be removed
270 *
271 * Properly shutdown the hcd, call driver's shutdown routine.
272 */
273static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
274{
275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
276
277 if (hcd->driver->shutdown)
278 hcd->driver->shutdown(hcd);
279
280 return 0;
281}
282
283
284static struct of_device_id ehci_hcd_xilinx_of_match[] = {
285 {.compatible = "xlnx,xps-usb-host-1.00.a",},
286 {},
287};
288MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
289
290static struct of_platform_driver ehci_hcd_xilinx_of_driver = {
291 .name = "xilinx-of-ehci",
292 .match_table = ehci_hcd_xilinx_of_match,
293 .probe = ehci_hcd_xilinx_of_probe,
294 .remove = ehci_hcd_xilinx_of_remove,
295 .shutdown = ehci_hcd_xilinx_of_shutdown,
296 .driver = {
297 .name = "xilinx-of-ehci",
298 .owner = THIS_MODULE,
299 },
300};
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 1a253ebf7e50..5151516ea1de 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -534,8 +534,8 @@ struct isp1362_hcd {
534 534
535 /* periodic schedule: isochronous */ 535 /* periodic schedule: isochronous */
536 struct list_head isoc; 536 struct list_head isoc;
537 int istl_flip:1; 537 unsigned int istl_flip:1;
538 int irq_active:1; 538 unsigned int irq_active:1;
539 539
540 /* Schedules for the current frame */ 540 /* Schedules for the current frame */
541 struct isp1362_ep_queue atl_queue; 541 struct isp1362_ep_queue atl_queue;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 7ccffcbe7b6f..68b83ab70719 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -35,7 +35,7 @@ extern int usb_disabled(void);
35 35
36static void at91_start_clock(void) 36static void at91_start_clock(void)
37{ 37{
38 if (cpu_is_at91sam9261()) 38 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
39 clk_enable(hclk); 39 clk_enable(hclk);
40 clk_enable(iclk); 40 clk_enable(iclk);
41 clk_enable(fclk); 41 clk_enable(fclk);
@@ -46,7 +46,7 @@ static void at91_stop_clock(void)
46{ 46{
47 clk_disable(fclk); 47 clk_disable(fclk);
48 clk_disable(iclk); 48 clk_disable(iclk);
49 if (cpu_is_at91sam9261()) 49 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
50 clk_disable(hclk); 50 clk_disable(hclk);
51 clocked = 0; 51 clocked = 0;
52} 52}
@@ -142,7 +142,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
142 142
143 iclk = clk_get(&pdev->dev, "ohci_clk"); 143 iclk = clk_get(&pdev->dev, "ohci_clk");
144 fclk = clk_get(&pdev->dev, "uhpck"); 144 fclk = clk_get(&pdev->dev, "uhpck");
145 if (cpu_is_at91sam9261()) 145 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
146 hclk = clk_get(&pdev->dev, "hck0"); 146 hclk = clk_get(&pdev->dev, "hck0");
147 147
148 at91_start_hc(pdev); 148 at91_start_hc(pdev);
@@ -155,7 +155,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
155 /* Error handling */ 155 /* Error handling */
156 at91_stop_hc(pdev); 156 at91_stop_hc(pdev);
157 157
158 if (cpu_is_at91sam9261()) 158 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
159 clk_put(hclk); 159 clk_put(hclk);
160 clk_put(fclk); 160 clk_put(fclk);
161 clk_put(iclk); 161 clk_put(iclk);
@@ -192,7 +192,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
192 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 192 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
193 usb_put_hcd(hcd); 193 usb_put_hcd(hcd);
194 194
195 if (cpu_is_at91sam9261()) 195 if (cpu_is_at91sam9261() || cpu_is_at91sam9g10())
196 clk_put(hclk); 196 clk_put(hclk);
197 clk_put(fclk); 197 clk_put(fclk);
198 clk_put(iclk); 198 clk_put(iclk);
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 100bf3d8437c..2769326da42e 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -98,8 +98,8 @@
98#define ISP1301_I2C_INTERRUPT_RISING 0xE 98#define ISP1301_I2C_INTERRUPT_RISING 0xE
99#define ISP1301_I2C_REG_CLEAR_ADDR 1 99#define ISP1301_I2C_REG_CLEAR_ADDR 1
100 100
101struct i2c_driver isp1301_driver; 101static struct i2c_driver isp1301_driver;
102struct i2c_client *isp1301_i2c_client; 102static struct i2c_client *isp1301_i2c_client;
103 103
104extern int usb_disabled(void); 104extern int usb_disabled(void);
105extern int ocpi_enable(void); 105extern int ocpi_enable(void);
@@ -120,12 +120,12 @@ static int isp1301_remove(struct i2c_client *client)
120 return 0; 120 return 0;
121} 121}
122 122
123const struct i2c_device_id isp1301_id[] = { 123static const struct i2c_device_id isp1301_id[] = {
124 { "isp1301_pnx", 0 }, 124 { "isp1301_pnx", 0 },
125 { } 125 { }
126}; 126};
127 127
128struct i2c_driver isp1301_driver = { 128static struct i2c_driver isp1301_driver = {
129 .driver = { 129 .driver = {
130 .name = "isp1301_pnx", 130 .name = "isp1301_pnx",
131 }, 131 },
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index e33d36256350..41dbc70ae752 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -822,8 +822,6 @@ static void force_dequeue(struct r8a66597 *r8a66597, u16 pipenum, u16 address)
822 return; 822 return;
823 823
824 list_for_each_entry_safe(td, next, list, queue) { 824 list_for_each_entry_safe(td, next, list, queue) {
825 if (!td)
826 continue;
827 if (td->address != address) 825 if (td->address != address)
828 continue; 826 continue;
829 827
@@ -2025,8 +2023,6 @@ static struct r8a66597_device *get_r8a66597_device(struct r8a66597 *r8a66597,
2025 struct list_head *list = &r8a66597->child_device; 2023 struct list_head *list = &r8a66597->child_device;
2026 2024
2027 list_for_each_entry(dev, list, device_list) { 2025 list_for_each_entry(dev, list, device_list) {
2028 if (!dev)
2029 continue;
2030 if (dev->usb_address != addr) 2026 if (dev->usb_address != addr)
2031 continue; 2027 continue;
2032 2028
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index 2273c815941f..8c1c610c9513 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -31,17 +31,29 @@ struct whc_dbg {
31 31
32void qset_print(struct seq_file *s, struct whc_qset *qset) 32void qset_print(struct seq_file *s, struct whc_qset *qset)
33{ 33{
34 static const char *qh_type[] = {
35 "ctrl", "isoc", "bulk", "intr", "rsvd", "rsvd", "rsvd", "lpintr", };
34 struct whc_std *std; 36 struct whc_std *std;
35 struct urb *urb = NULL; 37 struct urb *urb = NULL;
36 int i; 38 int i;
37 39
38 seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); 40 seq_printf(s, "qset %08x", (u32)qset->qset_dma);
41 if (&qset->list_node == qset->whc->async_list.prev) {
42 seq_printf(s, " (dummy)\n");
43 } else {
44 seq_printf(s, " ep%d%s-%s maxpkt: %d\n",
45 qset->qh.info1 & 0x0f,
46 (qset->qh.info1 >> 4) & 0x1 ? "in" : "out",
47 qh_type[(qset->qh.info1 >> 5) & 0x7],
48 (qset->qh.info1 >> 16) & 0xffff);
49 }
39 seq_printf(s, " -> %08x\n", (u32)qset->qh.link); 50 seq_printf(s, " -> %08x\n", (u32)qset->qh.link);
40 seq_printf(s, " info: %08x %08x %08x\n", 51 seq_printf(s, " info: %08x %08x %08x\n",
41 qset->qh.info1, qset->qh.info2, qset->qh.info3); 52 qset->qh.info1, qset->qh.info2, qset->qh.info3);
42 seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); 53 seq_printf(s, " sts: %04x errs: %d curwin: %08x\n",
54 qset->qh.status, qset->qh.err_count, qset->qh.cur_window);
43 seq_printf(s, " TD: sts: %08x opts: %08x\n", 55 seq_printf(s, " TD: sts: %08x opts: %08x\n",
44 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); 56 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
45 57
46 for (i = 0; i < WHCI_QSET_TD_MAX; i++) { 58 for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
47 seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", 59 seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
diff --git a/drivers/usb/host/whci/hcd.c b/drivers/usb/host/whci/hcd.c
index 687b622a1612..e0d3401285c8 100644
--- a/drivers/usb/host/whci/hcd.c
+++ b/drivers/usb/host/whci/hcd.c
@@ -250,6 +250,7 @@ static int whc_probe(struct umc_dev *umc)
250 } 250 }
251 251
252 usb_hcd->wireless = 1; 252 usb_hcd->wireless = 1;
253 usb_hcd->self.sg_tablesize = 2048; /* somewhat arbitrary */
253 254
254 wusbhc = usb_hcd_to_wusbhc(usb_hcd); 255 wusbhc = usb_hcd_to_wusbhc(usb_hcd);
255 whc = wusbhc_to_whc(wusbhc); 256 whc = wusbhc_to_whc(wusbhc);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index 1b9dc1571570..7d4204db0f61 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -49,16 +49,19 @@ struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
49 * state 49 * state
50 * @urb: an urb for a transfer to this endpoint 50 * @urb: an urb for a transfer to this endpoint
51 */ 51 */
52static void qset_fill_qh(struct whc_qset *qset, struct urb *urb) 52static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
53{ 53{
54 struct usb_device *usb_dev = urb->dev; 54 struct usb_device *usb_dev = urb->dev;
55 struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
55 struct usb_wireless_ep_comp_descriptor *epcd; 56 struct usb_wireless_ep_comp_descriptor *epcd;
56 bool is_out; 57 bool is_out;
58 uint8_t phy_rate;
57 59
58 is_out = usb_pipeout(urb->pipe); 60 is_out = usb_pipeout(urb->pipe);
59 61
60 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra; 62 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
61 63
64 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
62 if (epcd) { 65 if (epcd) {
63 qset->max_seq = epcd->bMaxSequence; 66 qset->max_seq = epcd->bMaxSequence;
64 qset->max_burst = epcd->bMaxBurst; 67 qset->max_burst = epcd->bMaxBurst;
@@ -67,12 +70,28 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
67 qset->max_burst = 1; 70 qset->max_burst = 1;
68 } 71 }
69 72
73 /*
74 * Initial PHY rate is 53.3 Mbit/s for control endpoints or
75 * the maximum supported by the device for other endpoints
76 * (unless limited by the user).
77 */
78 if (usb_pipecontrol(urb->pipe))
79 phy_rate = UWB_PHY_RATE_53;
80 else {
81 uint16_t phy_rates;
82
83 phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
84 phy_rate = fls(phy_rates) - 1;
85 if (phy_rate > whc->wusbhc.phy_rate)
86 phy_rate = whc->wusbhc.phy_rate;
87 }
88
70 qset->qh.info1 = cpu_to_le32( 89 qset->qh.info1 = cpu_to_le32(
71 QH_INFO1_EP(usb_pipeendpoint(urb->pipe)) 90 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
72 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN) 91 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
73 | usb_pipe_to_qh_type(urb->pipe) 92 | usb_pipe_to_qh_type(urb->pipe)
74 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum)) 93 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
75 | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out)) 94 | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
76 ); 95 );
77 qset->qh.info2 = cpu_to_le32( 96 qset->qh.info2 = cpu_to_le32(
78 QH_INFO2_BURST(qset->max_burst) 97 QH_INFO2_BURST(qset->max_burst)
@@ -86,7 +105,7 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
86 * strength and can presumably guess the Tx power required 105 * strength and can presumably guess the Tx power required
87 * from that? */ 106 * from that? */
88 qset->qh.info3 = cpu_to_le32( 107 qset->qh.info3 = cpu_to_le32(
89 QH_INFO3_TX_RATE_53_3 108 QH_INFO3_TX_RATE(phy_rate)
90 | QH_INFO3_TX_PWR(0) /* 0 == max power */ 109 | QH_INFO3_TX_PWR(0) /* 0 == max power */
91 ); 110 );
92 111
@@ -148,7 +167,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
148 167
149 qset->ep = urb->ep; 168 qset->ep = urb->ep;
150 urb->ep->hcpriv = qset; 169 urb->ep->hcpriv = qset;
151 qset_fill_qh(qset, urb); 170 qset_fill_qh(whc, qset, urb);
152 } 171 }
153 return qset; 172 return qset;
154} 173}
@@ -241,6 +260,36 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
241 qset->ntds--; 260 qset->ntds--;
242} 261}
243 262
263static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
264{
265 struct scatterlist *sg;
266 void *bounce;
267 size_t remaining, offset;
268
269 bounce = std->bounce_buf;
270 remaining = std->len;
271
272 sg = std->bounce_sg;
273 offset = std->bounce_offset;
274
275 while (remaining) {
276 size_t len;
277
278 len = min(sg->length - offset, remaining);
279 memcpy(sg_virt(sg) + offset, bounce, len);
280
281 bounce += len;
282 remaining -= len;
283
284 offset += len;
285 if (offset >= sg->length) {
286 sg = sg_next(sg);
287 offset = 0;
288 }
289 }
290
291}
292
244/** 293/**
245 * qset_free_std - remove an sTD and free it. 294 * qset_free_std - remove an sTD and free it.
246 * @whc: the WHCI host controller 295 * @whc: the WHCI host controller
@@ -249,13 +298,29 @@ static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
249void qset_free_std(struct whc *whc, struct whc_std *std) 298void qset_free_std(struct whc *whc, struct whc_std *std)
250{ 299{
251 list_del(&std->list_node); 300 list_del(&std->list_node);
252 if (std->num_pointers) { 301 if (std->bounce_buf) {
253 dma_unmap_single(whc->wusbhc.dev, std->dma_addr, 302 bool is_out = usb_pipeout(std->urb->pipe);
254 std->num_pointers * sizeof(struct whc_page_list_entry), 303 dma_addr_t dma_addr;
255 DMA_TO_DEVICE); 304
305 if (std->num_pointers)
306 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
307 else
308 dma_addr = std->dma_addr;
309
310 dma_unmap_single(whc->wusbhc.dev, dma_addr,
311 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
312 if (!is_out)
313 qset_copy_bounce_to_sg(whc, std);
314 kfree(std->bounce_buf);
315 }
316 if (std->pl_virt) {
317 if (std->dma_addr)
318 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
319 std->num_pointers * sizeof(struct whc_page_list_entry),
320 DMA_TO_DEVICE);
256 kfree(std->pl_virt); 321 kfree(std->pl_virt);
322 std->pl_virt = NULL;
257 } 323 }
258
259 kfree(std); 324 kfree(std);
260} 325}
261 326
@@ -293,12 +358,17 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
293{ 358{
294 dma_addr_t dma_addr = std->dma_addr; 359 dma_addr_t dma_addr = std->dma_addr;
295 dma_addr_t sp, ep; 360 dma_addr_t sp, ep;
296 size_t std_len = std->len;
297 size_t pl_len; 361 size_t pl_len;
298 int p; 362 int p;
299 363
300 sp = ALIGN(dma_addr, WHCI_PAGE_SIZE); 364 /* Short buffers don't need a page list. */
301 ep = dma_addr + std_len; 365 if (std->len <= WHCI_PAGE_SIZE) {
366 std->num_pointers = 0;
367 return 0;
368 }
369
370 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
371 ep = dma_addr + std->len;
302 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE); 372 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
303 373
304 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry); 374 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
@@ -309,7 +379,7 @@ static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_f
309 379
310 for (p = 0; p < std->num_pointers; p++) { 380 for (p = 0; p < std->num_pointers; p++) {
311 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr); 381 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
312 dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE); 382 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
313 } 383 }
314 384
315 return 0; 385 return 0;
@@ -339,6 +409,218 @@ static void urb_dequeue_work(struct work_struct *work)
339 spin_unlock_irqrestore(&whc->lock, flags); 409 spin_unlock_irqrestore(&whc->lock, flags);
340} 410}
341 411
412static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
413 struct urb *urb, gfp_t mem_flags)
414{
415 struct whc_std *std;
416
417 std = kzalloc(sizeof(struct whc_std), mem_flags);
418 if (std == NULL)
419 return NULL;
420
421 std->urb = urb;
422 std->qtd = NULL;
423
424 INIT_LIST_HEAD(&std->list_node);
425 list_add_tail(&std->list_node, &qset->stds);
426
427 return std;
428}
429
430static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
431 gfp_t mem_flags)
432{
433 size_t remaining;
434 struct scatterlist *sg;
435 int i;
436 int ntds = 0;
437 struct whc_std *std = NULL;
438 struct whc_page_list_entry *entry;
439 dma_addr_t prev_end = 0;
440 size_t pl_len;
441 int p = 0;
442
443 remaining = urb->transfer_buffer_length;
444
445 for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
446 dma_addr_t dma_addr;
447 size_t dma_remaining;
448 dma_addr_t sp, ep;
449 int num_pointers;
450
451 if (remaining == 0) {
452 break;
453 }
454
455 dma_addr = sg_dma_address(sg);
456 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
457
458 while (dma_remaining) {
459 size_t dma_len;
460
461 /*
462 * We can use the previous std (if it exists) provided that:
463 * - the previous one ended on a page boundary.
464 * - the current one begins on a page boundary.
465 * - the previous one isn't full.
466 *
467 * If a new std is needed but the previous one
468 * was not a whole number of packets then this
469 * sg list cannot be mapped onto multiple
470 * qTDs. Return an error and let the caller
471 * sort it out.
472 */
473 if (!std
474 || (prev_end & (WHCI_PAGE_SIZE-1))
475 || (dma_addr & (WHCI_PAGE_SIZE-1))
476 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
477 if (std->len % qset->max_packet != 0)
478 return -EINVAL;
479 std = qset_new_std(whc, qset, urb, mem_flags);
480 if (std == NULL) {
481 return -ENOMEM;
482 }
483 ntds++;
484 p = 0;
485 }
486
487 dma_len = dma_remaining;
488
489 /*
490 * If the remainder of this element doesn't
491 * fit in a single qTD, limit the qTD to a
492 * whole number of packets. This allows the
493 * remainder to go into the next qTD.
494 */
495 if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
496 dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
497 * qset->max_packet - std->len;
498 }
499
500 std->len += dma_len;
501 std->ntds_remaining = -1; /* filled in later */
502
503 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
504 ep = dma_addr + dma_len;
505 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
506 std->num_pointers += num_pointers;
507
508 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
509
510 std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
511 if (std->pl_virt == NULL) {
512 return -ENOMEM;
513 }
514
515 for (;p < std->num_pointers; p++, entry++) {
516 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
517 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
518 }
519
520 prev_end = dma_addr = ep;
521 dma_remaining -= dma_len;
522 remaining -= dma_len;
523 }
524 }
525
526 /* Now the number of stds is know, go back and fill in
527 std->ntds_remaining. */
528 list_for_each_entry(std, &qset->stds, list_node) {
529 if (std->ntds_remaining == -1) {
530 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
531 std->ntds_remaining = ntds--;
532 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
533 pl_len, DMA_TO_DEVICE);
534 }
535 }
536 return 0;
537}
538
539/**
540 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
541 *
542 * If the URB contains an sg list whose elements cannot be directly
543 * mapped to qTDs then the data must be transferred via bounce
544 * buffers.
545 */
546static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
547 struct urb *urb, gfp_t mem_flags)
548{
549 bool is_out = usb_pipeout(urb->pipe);
550 size_t max_std_len;
551 size_t remaining;
552 int ntds = 0;
553 struct whc_std *std = NULL;
554 void *bounce = NULL;
555 struct scatterlist *sg;
556 int i;
557
558 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
559 max_std_len = qset->max_burst * qset->max_packet;
560
561 remaining = urb->transfer_buffer_length;
562
563 for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
564 size_t len;
565 size_t sg_remaining;
566 void *orig;
567
568 if (remaining == 0) {
569 break;
570 }
571
572 sg_remaining = min_t(size_t, remaining, sg->length);
573 orig = sg_virt(sg);
574
575 while (sg_remaining) {
576 if (!std || std->len == max_std_len) {
577 std = qset_new_std(whc, qset, urb, mem_flags);
578 if (std == NULL)
579 return -ENOMEM;
580 std->bounce_buf = kmalloc(max_std_len, mem_flags);
581 if (std->bounce_buf == NULL)
582 return -ENOMEM;
583 std->bounce_sg = sg;
584 std->bounce_offset = orig - sg_virt(sg);
585 bounce = std->bounce_buf;
586 ntds++;
587 }
588
589 len = min(sg_remaining, max_std_len - std->len);
590
591 if (is_out)
592 memcpy(bounce, orig, len);
593
594 std->len += len;
595 std->ntds_remaining = -1; /* filled in later */
596
597 bounce += len;
598 orig += len;
599 sg_remaining -= len;
600 remaining -= len;
601 }
602 }
603
604 /*
605 * For each of the new sTDs, map the bounce buffers, create
606 * page lists (if necessary), and fill in std->ntds_remaining.
607 */
608 list_for_each_entry(std, &qset->stds, list_node) {
609 if (std->ntds_remaining != -1)
610 continue;
611
612 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
613 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
614
615 if (qset_fill_page_list(whc, std, mem_flags) < 0)
616 return -ENOMEM;
617
618 std->ntds_remaining = ntds--;
619 }
620
621 return 0;
622}
623
342/** 624/**
343 * qset_add_urb - add an urb to the qset's queue. 625 * qset_add_urb - add an urb to the qset's queue.
344 * 626 *
@@ -353,10 +635,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
353 int remaining = urb->transfer_buffer_length; 635 int remaining = urb->transfer_buffer_length;
354 u64 transfer_dma = urb->transfer_dma; 636 u64 transfer_dma = urb->transfer_dma;
355 int ntds_remaining; 637 int ntds_remaining;
356 638 int ret;
357 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
358 if (ntds_remaining == 0)
359 ntds_remaining = 1;
360 639
361 wurb = kzalloc(sizeof(struct whc_urb), mem_flags); 640 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
362 if (wurb == NULL) 641 if (wurb == NULL)
@@ -366,32 +645,39 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
366 wurb->urb = urb; 645 wurb->urb = urb;
367 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); 646 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
368 647
648 if (urb->sg) {
649 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
650 if (ret == -EINVAL) {
651 qset_free_stds(qset, urb);
652 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
653 }
654 if (ret < 0)
655 goto err_no_mem;
656 return 0;
657 }
658
659 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
660 if (ntds_remaining == 0)
661 ntds_remaining = 1;
662
369 while (ntds_remaining) { 663 while (ntds_remaining) {
370 struct whc_std *std; 664 struct whc_std *std;
371 size_t std_len; 665 size_t std_len;
372 666
373 std = kmalloc(sizeof(struct whc_std), mem_flags);
374 if (std == NULL)
375 goto err_no_mem;
376
377 std_len = remaining; 667 std_len = remaining;
378 if (std_len > QTD_MAX_XFER_SIZE) 668 if (std_len > QTD_MAX_XFER_SIZE)
379 std_len = QTD_MAX_XFER_SIZE; 669 std_len = QTD_MAX_XFER_SIZE;
380 670
381 std->urb = urb; 671 std = qset_new_std(whc, qset, urb, mem_flags);
672 if (std == NULL)
673 goto err_no_mem;
674
382 std->dma_addr = transfer_dma; 675 std->dma_addr = transfer_dma;
383 std->len = std_len; 676 std->len = std_len;
384 std->ntds_remaining = ntds_remaining; 677 std->ntds_remaining = ntds_remaining;
385 std->qtd = NULL;
386 678
387 INIT_LIST_HEAD(&std->list_node); 679 if (qset_fill_page_list(whc, std, mem_flags) < 0)
388 list_add_tail(&std->list_node, &qset->stds); 680 goto err_no_mem;
389
390 if (std_len > WHCI_PAGE_SIZE) {
391 if (qset_fill_page_list(whc, std, mem_flags) < 0)
392 goto err_no_mem;
393 } else
394 std->num_pointers = 0;
395 681
396 ntds_remaining--; 682 ntds_remaining--;
397 remaining -= std_len; 683 remaining -= std_len;
diff --git a/drivers/usb/host/whci/whcd.h b/drivers/usb/host/whci/whcd.h
index 24e94d983c5e..c80c7d93bc4a 100644
--- a/drivers/usb/host/whci/whcd.h
+++ b/drivers/usb/host/whci/whcd.h
@@ -84,6 +84,11 @@ struct whc {
84 * @len: the length of data in the associated TD. 84 * @len: the length of data in the associated TD.
85 * @ntds_remaining: number of TDs (starting from this one) in this transfer. 85 * @ntds_remaining: number of TDs (starting from this one) in this transfer.
86 * 86 *
87 * @bounce_buf: a bounce buffer if the std was from an urb with a sg
88 * list that could not be mapped to qTDs directly.
89 * @bounce_sg: the first scatterlist element bounce_buf is for.
90 * @bounce_offset: the offset into bounce_sg for the start of bounce_buf.
91 *
87 * Queued URBs may require more TDs than are available in a qset so we 92 * Queued URBs may require more TDs than are available in a qset so we
88 * use a list of these "software TDs" (sTDs) to hold per-TD data. 93 * use a list of these "software TDs" (sTDs) to hold per-TD data.
89 */ 94 */
@@ -97,6 +102,10 @@ struct whc_std {
97 int num_pointers; 102 int num_pointers;
98 dma_addr_t dma_addr; 103 dma_addr_t dma_addr;
99 struct whc_page_list_entry *pl_virt; 104 struct whc_page_list_entry *pl_virt;
105
106 void *bounce_buf;
107 struct scatterlist *bounce_sg;
108 unsigned bounce_offset;
100}; 109};
101 110
102/** 111/**
diff --git a/drivers/usb/host/whci/whci-hc.h b/drivers/usb/host/whci/whci-hc.h
index e8d0001605be..4d4cbc0730bf 100644
--- a/drivers/usb/host/whci/whci-hc.h
+++ b/drivers/usb/host/whci/whci-hc.h
@@ -172,14 +172,7 @@ struct whc_qhead {
172#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */ 172#define QH_INFO3_MAX_DELAY(d) ((d) << 0) /* maximum stream delay in 125 us units (isoc only) */
173#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */ 173#define QH_INFO3_INTERVAL(i) ((i) << 16) /* segment interval in 125 us units (isoc only) */
174 174
175#define QH_INFO3_TX_RATE_53_3 (0 << 24) 175#define QH_INFO3_TX_RATE(r) ((r) << 24) /* PHY rate (see [ECMA-368] section 10.3.1.1) */
176#define QH_INFO3_TX_RATE_80 (1 << 24)
177#define QH_INFO3_TX_RATE_106_7 (2 << 24)
178#define QH_INFO3_TX_RATE_160 (3 << 24)
179#define QH_INFO3_TX_RATE_200 (4 << 24)
180#define QH_INFO3_TX_RATE_320 (5 << 24)
181#define QH_INFO3_TX_RATE_400 (6 << 24)
182#define QH_INFO3_TX_RATE_480 (7 << 24)
183#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */ 176#define QH_INFO3_TX_PWR(p) ((p) << 29) /* transmit power (see [WUSB] section 5.2.1.2) */
184 177
185#define QH_STATUS_FLOW_CTRL (1 << 15) 178#define QH_STATUS_FLOW_CTRL (1 << 15)
@@ -267,8 +260,9 @@ struct whc_qset {
267 unsigned reset:1; 260 unsigned reset:1;
268 struct urb *pause_after_urb; 261 struct urb *pause_after_urb;
269 struct completion remove_complete; 262 struct completion remove_complete;
270 int max_burst; 263 uint16_t max_packet;
271 int max_seq; 264 uint8_t max_burst;
265 uint8_t max_seq;
272}; 266};
273 267
274static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target) 268static inline void whc_qset_set_link_ptr(u64 *ptr, u64 target)
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 932f99938481..5e92c72df642 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -67,22 +67,14 @@ static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
67} 67}
68 68
69/* 69/*
70 * Force HC into halt state. 70 * Disable interrupts and begin the xHCI halting process.
71 *
72 * Disable any IRQs and clear the run/stop bit.
73 * HC will complete any current and actively pipelined transactions, and
74 * should halt within 16 microframes of the run/stop bit being cleared.
75 * Read HC Halted bit in the status register to see when the HC is finished.
76 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
77 */ 71 */
78int xhci_halt(struct xhci_hcd *xhci) 72void xhci_quiesce(struct xhci_hcd *xhci)
79{ 73{
80 u32 halted; 74 u32 halted;
81 u32 cmd; 75 u32 cmd;
82 u32 mask; 76 u32 mask;
83 77
84 xhci_dbg(xhci, "// Halt the HC\n");
85 /* Disable all interrupts from the host controller */
86 mask = ~(XHCI_IRQS); 78 mask = ~(XHCI_IRQS);
87 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT; 79 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
88 if (!halted) 80 if (!halted)
@@ -91,6 +83,21 @@ int xhci_halt(struct xhci_hcd *xhci)
91 cmd = xhci_readl(xhci, &xhci->op_regs->command); 83 cmd = xhci_readl(xhci, &xhci->op_regs->command);
92 cmd &= mask; 84 cmd &= mask;
93 xhci_writel(xhci, cmd, &xhci->op_regs->command); 85 xhci_writel(xhci, cmd, &xhci->op_regs->command);
86}
87
88/*
89 * Force HC into halt state.
90 *
91 * Disable any IRQs and clear the run/stop bit.
92 * HC will complete any current and actively pipelined transactions, and
93 * should halt within 16 microframes of the run/stop bit being cleared.
94 * Read HC Halted bit in the status register to see when the HC is finished.
95 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
96 */
97int xhci_halt(struct xhci_hcd *xhci)
98{
99 xhci_dbg(xhci, "// Halt the HC\n");
100 xhci_quiesce(xhci);
94 101
95 return handshake(xhci, &xhci->op_regs->status, 102 return handshake(xhci, &xhci->op_regs->status,
96 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 103 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
@@ -124,28 +131,6 @@ int xhci_reset(struct xhci_hcd *xhci)
124 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); 131 return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000);
125} 132}
126 133
127/*
128 * Stop the HC from processing the endpoint queues.
129 */
130static void xhci_quiesce(struct xhci_hcd *xhci)
131{
132 /*
133 * Queues are per endpoint, so we need to disable an endpoint or slot.
134 *
135 * To disable a slot, we need to insert a disable slot command on the
136 * command ring and ring the doorbell. This will also free any internal
137 * resources associated with the slot (which might not be what we want).
138 *
139 * A Release Endpoint command sounds better - doesn't free internal HC
140 * memory, but removes the endpoints from the schedule and releases the
141 * bandwidth, disables the doorbells, and clears the endpoint enable
142 * flag. Usually used prior to a set interface command.
143 *
144 * TODO: Implement after command ring code is done.
145 */
146 BUG_ON(!HC_IS_RUNNING(xhci_to_hcd(xhci)->state));
147 xhci_dbg(xhci, "Finished quiescing -- code not written yet\n");
148}
149 134
150#if 0 135#if 0
151/* Set up MSI-X table for entry 0 (may claim other entries later) */ 136/* Set up MSI-X table for entry 0 (may claim other entries later) */
@@ -261,8 +246,14 @@ static void xhci_work(struct xhci_hcd *xhci)
261 /* Flush posted writes */ 246 /* Flush posted writes */
262 xhci_readl(xhci, &xhci->ir_set->irq_pending); 247 xhci_readl(xhci, &xhci->ir_set->irq_pending);
263 248
264 /* FIXME this should be a delayed service routine that clears the EHB */ 249 if (xhci->xhc_state & XHCI_STATE_DYING)
265 xhci_handle_event(xhci); 250 xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
251 "Shouldn't IRQs be disabled?\n");
252 else
253 /* FIXME this should be a delayed service routine
254 * that clears the EHB.
255 */
256 xhci_handle_event(xhci);
266 257
267 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */ 258 /* Clear the event handler busy flag (RW1C); the event ring should be empty. */
268 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 259 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
@@ -335,7 +326,7 @@ void xhci_event_ring_work(unsigned long arg)
335 spin_lock_irqsave(&xhci->lock, flags); 326 spin_lock_irqsave(&xhci->lock, flags);
336 temp = xhci_readl(xhci, &xhci->op_regs->status); 327 temp = xhci_readl(xhci, &xhci->op_regs->status);
337 xhci_dbg(xhci, "op reg status = 0x%x\n", temp); 328 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
338 if (temp == 0xffffffff) { 329 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
339 xhci_dbg(xhci, "HW died, polling stopped.\n"); 330 xhci_dbg(xhci, "HW died, polling stopped.\n");
340 spin_unlock_irqrestore(&xhci->lock, flags); 331 spin_unlock_irqrestore(&xhci->lock, flags);
341 return; 332 return;
@@ -490,8 +481,6 @@ void xhci_stop(struct usb_hcd *hcd)
490 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 481 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
491 482
492 spin_lock_irq(&xhci->lock); 483 spin_lock_irq(&xhci->lock);
493 if (HC_IS_RUNNING(hcd->state))
494 xhci_quiesce(xhci);
495 xhci_halt(xhci); 484 xhci_halt(xhci);
496 xhci_reset(xhci); 485 xhci_reset(xhci);
497 spin_unlock_irq(&xhci->lock); 486 spin_unlock_irq(&xhci->lock);
@@ -727,16 +716,22 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
727 * atomic context to this function, which may allocate memory. 716 * atomic context to this function, which may allocate memory.
728 */ 717 */
729 spin_lock_irqsave(&xhci->lock, flags); 718 spin_lock_irqsave(&xhci->lock, flags);
719 if (xhci->xhc_state & XHCI_STATE_DYING)
720 goto dying;
730 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb, 721 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
731 slot_id, ep_index); 722 slot_id, ep_index);
732 spin_unlock_irqrestore(&xhci->lock, flags); 723 spin_unlock_irqrestore(&xhci->lock, flags);
733 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) { 724 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
734 spin_lock_irqsave(&xhci->lock, flags); 725 spin_lock_irqsave(&xhci->lock, flags);
726 if (xhci->xhc_state & XHCI_STATE_DYING)
727 goto dying;
735 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, 728 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
736 slot_id, ep_index); 729 slot_id, ep_index);
737 spin_unlock_irqrestore(&xhci->lock, flags); 730 spin_unlock_irqrestore(&xhci->lock, flags);
738 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) { 731 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
739 spin_lock_irqsave(&xhci->lock, flags); 732 spin_lock_irqsave(&xhci->lock, flags);
733 if (xhci->xhc_state & XHCI_STATE_DYING)
734 goto dying;
740 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb, 735 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
741 slot_id, ep_index); 736 slot_id, ep_index);
742 spin_unlock_irqrestore(&xhci->lock, flags); 737 spin_unlock_irqrestore(&xhci->lock, flags);
@@ -745,6 +740,12 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
745 } 740 }
746exit: 741exit:
747 return ret; 742 return ret;
743dying:
744 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
745 "non-responsive xHCI host.\n",
746 urb->ep->desc.bEndpointAddress, urb);
747 spin_unlock_irqrestore(&xhci->lock, flags);
748 return -ESHUTDOWN;
748} 749}
749 750
750/* 751/*
@@ -806,6 +807,17 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
806 kfree(td); 807 kfree(td);
807 return ret; 808 return ret;
808 } 809 }
810 if (xhci->xhc_state & XHCI_STATE_DYING) {
811 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
812 "non-responsive xHCI host.\n",
813 urb->ep->desc.bEndpointAddress, urb);
814 /* Let the stop endpoint command watchdog timer (which set this
815 * state) finish cleaning up the endpoint TD lists. We must
816 * have caught it in the middle of dropping a lock and giving
817 * back an URB.
818 */
819 goto done;
820 }
809 821
810 xhci_dbg(xhci, "Cancel URB %p\n", urb); 822 xhci_dbg(xhci, "Cancel URB %p\n", urb);
811 xhci_dbg(xhci, "Event ring:\n"); 823 xhci_dbg(xhci, "Event ring:\n");
@@ -817,12 +829,16 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
817 xhci_debug_ring(xhci, ep_ring); 829 xhci_debug_ring(xhci, ep_ring);
818 td = (struct xhci_td *) urb->hcpriv; 830 td = (struct xhci_td *) urb->hcpriv;
819 831
820 ep->cancels_pending++;
821 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list); 832 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
822 /* Queue a stop endpoint command, but only if this is 833 /* Queue a stop endpoint command, but only if this is
823 * the first cancellation to be handled. 834 * the first cancellation to be handled.
824 */ 835 */
825 if (ep->cancels_pending == 1) { 836 if (!(ep->ep_state & EP_HALT_PENDING)) {
837 ep->ep_state |= EP_HALT_PENDING;
838 ep->stop_cmds_pending++;
839 ep->stop_cmd_timer.expires = jiffies +
840 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
841 add_timer(&ep->stop_cmd_timer);
826 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index); 842 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
827 xhci_ring_cmd_db(xhci); 843 xhci_ring_cmd_db(xhci);
828 } 844 }
@@ -1246,13 +1262,35 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1246 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info)); 1262 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1247 1263
1248 xhci_zero_in_ctx(xhci, virt_dev); 1264 xhci_zero_in_ctx(xhci, virt_dev);
1249 /* Free any old rings */ 1265 /* Install new rings and free or cache any old rings */
1250 for (i = 1; i < 31; ++i) { 1266 for (i = 1; i < 31; ++i) {
1251 if (virt_dev->eps[i].new_ring) { 1267 int rings_cached;
1252 xhci_ring_free(xhci, virt_dev->eps[i].ring); 1268
1253 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; 1269 if (!virt_dev->eps[i].new_ring)
1254 virt_dev->eps[i].new_ring = NULL; 1270 continue;
1271 /* Only cache or free the old ring if it exists.
1272 * It may not if this is the first add of an endpoint.
1273 */
1274 if (virt_dev->eps[i].ring) {
1275 rings_cached = virt_dev->num_rings_cached;
1276 if (rings_cached < XHCI_MAX_RINGS_CACHED) {
1277 virt_dev->num_rings_cached++;
1278 rings_cached = virt_dev->num_rings_cached;
1279 virt_dev->ring_cache[rings_cached] =
1280 virt_dev->eps[i].ring;
1281 xhci_dbg(xhci, "Cached old ring, "
1282 "%d ring%s cached\n",
1283 rings_cached,
1284 (rings_cached > 1) ? "s" : "");
1285 } else {
1286 xhci_ring_free(xhci, virt_dev->eps[i].ring);
1287 xhci_dbg(xhci, "Ring cache full (%d rings), "
1288 "freeing ring\n",
1289 virt_dev->num_rings_cached);
1290 }
1255 } 1291 }
1292 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1293 virt_dev->eps[i].new_ring = NULL;
1256 } 1294 }
1257 1295
1258 return ret; 1296 return ret;
@@ -1427,16 +1465,27 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
1427void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) 1465void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
1428{ 1466{
1429 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 1467 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
1468 struct xhci_virt_device *virt_dev;
1430 unsigned long flags; 1469 unsigned long flags;
1431 u32 state; 1470 u32 state;
1471 int i;
1432 1472
1433 if (udev->slot_id == 0) 1473 if (udev->slot_id == 0)
1434 return; 1474 return;
1475 virt_dev = xhci->devs[udev->slot_id];
1476 if (!virt_dev)
1477 return;
1478
1479 /* Stop any wayward timer functions (which may grab the lock) */
1480 for (i = 0; i < 31; ++i) {
1481 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
1482 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
1483 }
1435 1484
1436 spin_lock_irqsave(&xhci->lock, flags); 1485 spin_lock_irqsave(&xhci->lock, flags);
1437 /* Don't disable the slot if the host controller is dead. */ 1486 /* Don't disable the slot if the host controller is dead. */
1438 state = xhci_readl(xhci, &xhci->op_regs->status); 1487 state = xhci_readl(xhci, &xhci->op_regs->status);
1439 if (state == 0xffffffff) { 1488 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
1440 xhci_free_virt_device(xhci, udev->slot_id); 1489 xhci_free_virt_device(xhci, udev->slot_id);
1441 spin_unlock_irqrestore(&xhci->lock, flags); 1490 spin_unlock_irqrestore(&xhci->lock, flags);
1442 return; 1491 return;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index b8fd270a8b0d..bffcef7a5545 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -125,6 +125,23 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
125 kfree(ring); 125 kfree(ring);
126} 126}
127 127
128static void xhci_initialize_ring_info(struct xhci_ring *ring)
129{
130 /* The ring is empty, so the enqueue pointer == dequeue pointer */
131 ring->enqueue = ring->first_seg->trbs;
132 ring->enq_seg = ring->first_seg;
133 ring->dequeue = ring->enqueue;
134 ring->deq_seg = ring->first_seg;
135 /* The ring is initialized to 0. The producer must write 1 to the cycle
136 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
137 * compare CCS to the cycle bit to check ownership, so CCS = 1.
138 */
139 ring->cycle_state = 1;
140 /* Not necessary for new rings, but needed for re-initialized rings */
141 ring->enq_updates = 0;
142 ring->deq_updates = 0;
143}
144
128/** 145/**
129 * Create a new ring with zero or more segments. 146 * Create a new ring with zero or more segments.
130 * 147 *
@@ -173,17 +190,7 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
173 " segment %p (virtual), 0x%llx (DMA)\n", 190 " segment %p (virtual), 0x%llx (DMA)\n",
174 prev, (unsigned long long)prev->dma); 191 prev, (unsigned long long)prev->dma);
175 } 192 }
176 /* The ring is empty, so the enqueue pointer == dequeue pointer */ 193 xhci_initialize_ring_info(ring);
177 ring->enqueue = ring->first_seg->trbs;
178 ring->enq_seg = ring->first_seg;
179 ring->dequeue = ring->enqueue;
180 ring->deq_seg = ring->first_seg;
181 /* The ring is initialized to 0. The producer must write 1 to the cycle
182 * bit to handover ownership of the TRB, so PCS = 1. The consumer must
183 * compare CCS to the cycle bit to check ownership, so CCS = 1.
184 */
185 ring->cycle_state = 1;
186
187 return ring; 194 return ring;
188 195
189fail: 196fail:
@@ -191,6 +198,27 @@ fail:
191 return 0; 198 return 0;
192} 199}
193 200
201/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
202 * pointers to the beginning of the ring.
203 */
204static void xhci_reinit_cached_ring(struct xhci_hcd *xhci,
205 struct xhci_ring *ring)
206{
207 struct xhci_segment *seg = ring->first_seg;
208 do {
209 memset(seg->trbs, 0,
210 sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
211 /* All endpoint rings have link TRBs */
212 xhci_link_segments(xhci, seg, seg->next, 1);
213 seg = seg->next;
214 } while (seg != ring->first_seg);
215 xhci_initialize_ring_info(ring);
216 /* td list should be empty since all URBs have been cancelled,
217 * but just in case...
218 */
219 INIT_LIST_HEAD(&ring->td_list);
220}
221
194#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) 222#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
195 223
196struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, 224struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -248,6 +276,15 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
248 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); 276 (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params)));
249} 277}
250 278
279static void xhci_init_endpoint_timer(struct xhci_hcd *xhci,
280 struct xhci_virt_ep *ep)
281{
282 init_timer(&ep->stop_cmd_timer);
283 ep->stop_cmd_timer.data = (unsigned long) ep;
284 ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog;
285 ep->xhci = xhci;
286}
287
251/* All the xhci_tds in the ring's TD list should be freed at this point */ 288/* All the xhci_tds in the ring's TD list should be freed at this point */
252void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) 289void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
253{ 290{
@@ -267,6 +304,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
267 if (dev->eps[i].ring) 304 if (dev->eps[i].ring)
268 xhci_ring_free(xhci, dev->eps[i].ring); 305 xhci_ring_free(xhci, dev->eps[i].ring);
269 306
307 if (dev->ring_cache) {
308 for (i = 0; i < dev->num_rings_cached; i++)
309 xhci_ring_free(xhci, dev->ring_cache[i]);
310 kfree(dev->ring_cache);
311 }
312
270 if (dev->in_ctx) 313 if (dev->in_ctx)
271 xhci_free_container_ctx(xhci, dev->in_ctx); 314 xhci_free_container_ctx(xhci, dev->in_ctx);
272 if (dev->out_ctx) 315 if (dev->out_ctx)
@@ -309,15 +352,25 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
309 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, 352 xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
310 (unsigned long long)dev->in_ctx->dma); 353 (unsigned long long)dev->in_ctx->dma);
311 354
312 /* Initialize the cancellation list for each endpoint */ 355 /* Initialize the cancellation list and watchdog timers for each ep */
313 for (i = 0; i < 31; i++) 356 for (i = 0; i < 31; i++) {
357 xhci_init_endpoint_timer(xhci, &dev->eps[i]);
314 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); 358 INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
359 }
315 360
316 /* Allocate endpoint 0 ring */ 361 /* Allocate endpoint 0 ring */
317 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); 362 dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
318 if (!dev->eps[0].ring) 363 if (!dev->eps[0].ring)
319 goto fail; 364 goto fail;
320 365
366 /* Allocate pointers to the ring cache */
367 dev->ring_cache = kzalloc(
368 sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED,
369 flags);
370 if (!dev->ring_cache)
371 goto fail;
372 dev->num_rings_cached = 0;
373
321 init_completion(&dev->cmd_completion); 374 init_completion(&dev->cmd_completion);
322 INIT_LIST_HEAD(&dev->cmd_list); 375 INIT_LIST_HEAD(&dev->cmd_list);
323 376
@@ -544,8 +597,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
544 /* Set up the endpoint ring */ 597 /* Set up the endpoint ring */
545 virt_dev->eps[ep_index].new_ring = 598 virt_dev->eps[ep_index].new_ring =
546 xhci_ring_alloc(xhci, 1, true, mem_flags); 599 xhci_ring_alloc(xhci, 1, true, mem_flags);
547 if (!virt_dev->eps[ep_index].new_ring) 600 if (!virt_dev->eps[ep_index].new_ring) {
548 return -ENOMEM; 601 /* Attempt to use the ring cache */
602 if (virt_dev->num_rings_cached == 0)
603 return -ENOMEM;
604 virt_dev->eps[ep_index].new_ring =
605 virt_dev->ring_cache[virt_dev->num_rings_cached];
606 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
607 virt_dev->num_rings_cached--;
608 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring);
609 }
549 ep_ring = virt_dev->eps[ep_index].new_ring; 610 ep_ring = virt_dev->eps[ep_index].new_ring;
550 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; 611 ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
551 612
@@ -768,14 +829,17 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
768 829
769 command->in_ctx = 830 command->in_ctx =
770 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); 831 xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
771 if (!command->in_ctx) 832 if (!command->in_ctx) {
833 kfree(command);
772 return NULL; 834 return NULL;
835 }
773 836
774 if (allocate_completion) { 837 if (allocate_completion) {
775 command->completion = 838 command->completion =
776 kzalloc(sizeof(struct completion), mem_flags); 839 kzalloc(sizeof(struct completion), mem_flags);
777 if (!command->completion) { 840 if (!command->completion) {
778 xhci_free_container_ctx(xhci, command->in_ctx); 841 xhci_free_container_ctx(xhci, command->in_ctx);
842 kfree(command);
779 return NULL; 843 return NULL;
780 } 844 }
781 init_completion(command->completion); 845 init_completion(command->completion);
@@ -848,6 +912,163 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
848 xhci->page_shift = 0; 912 xhci->page_shift = 0;
849} 913}
850 914
915static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
916 struct xhci_segment *input_seg,
917 union xhci_trb *start_trb,
918 union xhci_trb *end_trb,
919 dma_addr_t input_dma,
920 struct xhci_segment *result_seg,
921 char *test_name, int test_number)
922{
923 unsigned long long start_dma;
924 unsigned long long end_dma;
925 struct xhci_segment *seg;
926
927 start_dma = xhci_trb_virt_to_dma(input_seg, start_trb);
928 end_dma = xhci_trb_virt_to_dma(input_seg, end_trb);
929
930 seg = trb_in_td(input_seg, start_trb, end_trb, input_dma);
931 if (seg != result_seg) {
932 xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n",
933 test_name, test_number);
934 xhci_warn(xhci, "Tested TRB math w/ seg %p and "
935 "input DMA 0x%llx\n",
936 input_seg,
937 (unsigned long long) input_dma);
938 xhci_warn(xhci, "starting TRB %p (0x%llx DMA), "
939 "ending TRB %p (0x%llx DMA)\n",
940 start_trb, start_dma,
941 end_trb, end_dma);
942 xhci_warn(xhci, "Expected seg %p, got seg %p\n",
943 result_seg, seg);
944 return -1;
945 }
946 return 0;
947}
948
949/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
950static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags)
951{
952 struct {
953 dma_addr_t input_dma;
954 struct xhci_segment *result_seg;
955 } simple_test_vector [] = {
956 /* A zeroed DMA field should fail */
957 { 0, NULL },
958 /* One TRB before the ring start should fail */
959 { xhci->event_ring->first_seg->dma - 16, NULL },
960 /* One byte before the ring start should fail */
961 { xhci->event_ring->first_seg->dma - 1, NULL },
962 /* Starting TRB should succeed */
963 { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg },
964 /* Ending TRB should succeed */
965 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16,
966 xhci->event_ring->first_seg },
967 /* One byte after the ring end should fail */
968 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL },
969 /* One TRB after the ring end should fail */
970 { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL },
971 /* An address of all ones should fail */
972 { (dma_addr_t) (~0), NULL },
973 };
974 struct {
975 struct xhci_segment *input_seg;
976 union xhci_trb *start_trb;
977 union xhci_trb *end_trb;
978 dma_addr_t input_dma;
979 struct xhci_segment *result_seg;
980 } complex_test_vector [] = {
981 /* Test feeding a valid DMA address from a different ring */
982 { .input_seg = xhci->event_ring->first_seg,
983 .start_trb = xhci->event_ring->first_seg->trbs,
984 .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
985 .input_dma = xhci->cmd_ring->first_seg->dma,
986 .result_seg = NULL,
987 },
988 /* Test feeding a valid end TRB from a different ring */
989 { .input_seg = xhci->event_ring->first_seg,
990 .start_trb = xhci->event_ring->first_seg->trbs,
991 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
992 .input_dma = xhci->cmd_ring->first_seg->dma,
993 .result_seg = NULL,
994 },
995 /* Test feeding a valid start and end TRB from a different ring */
996 { .input_seg = xhci->event_ring->first_seg,
997 .start_trb = xhci->cmd_ring->first_seg->trbs,
998 .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
999 .input_dma = xhci->cmd_ring->first_seg->dma,
1000 .result_seg = NULL,
1001 },
1002 /* TRB in this ring, but after this TD */
1003 { .input_seg = xhci->event_ring->first_seg,
1004 .start_trb = &xhci->event_ring->first_seg->trbs[0],
1005 .end_trb = &xhci->event_ring->first_seg->trbs[3],
1006 .input_dma = xhci->event_ring->first_seg->dma + 4*16,
1007 .result_seg = NULL,
1008 },
1009 /* TRB in this ring, but before this TD */
1010 { .input_seg = xhci->event_ring->first_seg,
1011 .start_trb = &xhci->event_ring->first_seg->trbs[3],
1012 .end_trb = &xhci->event_ring->first_seg->trbs[6],
1013 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1014 .result_seg = NULL,
1015 },
1016 /* TRB in this ring, but after this wrapped TD */
1017 { .input_seg = xhci->event_ring->first_seg,
1018 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1019 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1020 .input_dma = xhci->event_ring->first_seg->dma + 2*16,
1021 .result_seg = NULL,
1022 },
1023 /* TRB in this ring, but before this wrapped TD */
1024 { .input_seg = xhci->event_ring->first_seg,
1025 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1026 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1027 .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16,
1028 .result_seg = NULL,
1029 },
1030 /* TRB not in this ring, and we have a wrapped TD */
1031 { .input_seg = xhci->event_ring->first_seg,
1032 .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3],
1033 .end_trb = &xhci->event_ring->first_seg->trbs[1],
1034 .input_dma = xhci->cmd_ring->first_seg->dma + 2*16,
1035 .result_seg = NULL,
1036 },
1037 };
1038
1039 unsigned int num_tests;
1040 int i, ret;
1041
1042 num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]);
1043 for (i = 0; i < num_tests; i++) {
1044 ret = xhci_test_trb_in_td(xhci,
1045 xhci->event_ring->first_seg,
1046 xhci->event_ring->first_seg->trbs,
1047 &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1],
1048 simple_test_vector[i].input_dma,
1049 simple_test_vector[i].result_seg,
1050 "Simple", i);
1051 if (ret < 0)
1052 return ret;
1053 }
1054
1055 num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]);
1056 for (i = 0; i < num_tests; i++) {
1057 ret = xhci_test_trb_in_td(xhci,
1058 complex_test_vector[i].input_seg,
1059 complex_test_vector[i].start_trb,
1060 complex_test_vector[i].end_trb,
1061 complex_test_vector[i].input_dma,
1062 complex_test_vector[i].result_seg,
1063 "Complex", i);
1064 if (ret < 0)
1065 return ret;
1066 }
1067 xhci_dbg(xhci, "TRB math tests passed.\n");
1068 return 0;
1069}
1070
1071
851int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 1072int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
852{ 1073{
853 dma_addr_t dma; 1074 dma_addr_t dma;
@@ -951,6 +1172,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
951 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); 1172 xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags);
952 if (!xhci->event_ring) 1173 if (!xhci->event_ring)
953 goto fail; 1174 goto fail;
1175 if (xhci_check_trb_in_td_math(xhci, flags) < 0)
1176 goto fail;
954 1177
955 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), 1178 xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev),
956 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); 1179 sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 06595ec27bb7..e097008d6fb1 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,6 +54,8 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
54 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 54 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
55 int retval; 55 int retval;
56 56
57 hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1;
58
57 xhci->cap_regs = hcd->regs; 59 xhci->cap_regs = hcd->regs;
58 xhci->op_regs = hcd->regs + 60 xhci->op_regs = hcd->regs +
59 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase)); 61 HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 821b7b4709de..ee7bc7ecbc59 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -306,7 +306,7 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
306 /* Don't ring the doorbell for this endpoint if there are pending 306 /* Don't ring the doorbell for this endpoint if there are pending
307 * cancellations because the we don't want to interrupt processing. 307 * cancellations because the we don't want to interrupt processing.
308 */ 308 */
309 if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING) 309 if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
310 && !(ep_state & EP_HALTED)) { 310 && !(ep_state & EP_HALTED)) {
311 field = xhci_readl(xhci, db_addr) & DB_MASK; 311 field = xhci_readl(xhci, db_addr) & DB_MASK;
312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr); 312 xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
@@ -475,6 +475,35 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
475 ep->ep_state |= SET_DEQ_PENDING; 475 ep->ep_state |= SET_DEQ_PENDING;
476} 476}
477 477
478static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
479 struct xhci_virt_ep *ep)
480{
481 ep->ep_state &= ~EP_HALT_PENDING;
482 /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
483 * timer is running on another CPU, we don't decrement stop_cmds_pending
484 * (since we didn't successfully stop the watchdog timer).
485 */
486 if (del_timer(&ep->stop_cmd_timer))
487 ep->stop_cmds_pending--;
488}
489
490/* Must be called with xhci->lock held in interrupt context */
491static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
492 struct xhci_td *cur_td, int status, char *adjective)
493{
494 struct usb_hcd *hcd = xhci_to_hcd(xhci);
495
496 cur_td->urb->hcpriv = NULL;
497 usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
498 xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
499
500 spin_unlock(&xhci->lock);
501 usb_hcd_giveback_urb(hcd, cur_td->urb, status);
502 kfree(cur_td);
503 spin_lock(&xhci->lock);
504 xhci_dbg(xhci, "%s URB given back\n", adjective);
505}
506
478/* 507/*
479 * When we get a command completion for a Stop Endpoint Command, we need to 508 * When we get a command completion for a Stop Endpoint Command, we need to
480 * unlink any cancelled TDs from the ring. There are two ways to do that: 509 * unlink any cancelled TDs from the ring. There are two ways to do that:
@@ -497,9 +526,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
497 struct xhci_td *last_unlinked_td; 526 struct xhci_td *last_unlinked_td;
498 527
499 struct xhci_dequeue_state deq_state; 528 struct xhci_dequeue_state deq_state;
500#ifdef CONFIG_USB_HCD_STAT
501 ktime_t stop_time = ktime_get();
502#endif
503 529
504 memset(&deq_state, 0, sizeof(deq_state)); 530 memset(&deq_state, 0, sizeof(deq_state));
505 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]); 531 slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
@@ -507,8 +533,11 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
507 ep = &xhci->devs[slot_id]->eps[ep_index]; 533 ep = &xhci->devs[slot_id]->eps[ep_index];
508 ep_ring = ep->ring; 534 ep_ring = ep->ring;
509 535
510 if (list_empty(&ep->cancelled_td_list)) 536 if (list_empty(&ep->cancelled_td_list)) {
537 xhci_stop_watchdog_timer_in_irq(xhci, ep);
538 ring_ep_doorbell(xhci, slot_id, ep_index);
511 return; 539 return;
540 }
512 541
513 /* Fix up the ep ring first, so HW stops executing cancelled TDs. 542 /* Fix up the ep ring first, so HW stops executing cancelled TDs.
514 * We have the xHCI lock, so nothing can modify this list until we drop 543 * We have the xHCI lock, so nothing can modify this list until we drop
@@ -535,9 +564,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
535 * the cancelled TD list for URB completion later. 564 * the cancelled TD list for URB completion later.
536 */ 565 */
537 list_del(&cur_td->td_list); 566 list_del(&cur_td->td_list);
538 ep->cancels_pending--;
539 } 567 }
540 last_unlinked_td = cur_td; 568 last_unlinked_td = cur_td;
569 xhci_stop_watchdog_timer_in_irq(xhci, ep);
541 570
542 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */ 571 /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
543 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) { 572 if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
@@ -561,27 +590,136 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
561 list_del(&cur_td->cancelled_td_list); 590 list_del(&cur_td->cancelled_td_list);
562 591
563 /* Clean up the cancelled URB */ 592 /* Clean up the cancelled URB */
564#ifdef CONFIG_USB_HCD_STAT
565 hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
566 ktime_sub(stop_time, cur_td->start_time));
567#endif
568 cur_td->urb->hcpriv = NULL;
569 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
570
571 xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
572 spin_unlock(&xhci->lock);
573 /* Doesn't matter what we pass for status, since the core will 593 /* Doesn't matter what we pass for status, since the core will
574 * just overwrite it (because the URB has been unlinked). 594 * just overwrite it (because the URB has been unlinked).
575 */ 595 */
576 usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0); 596 xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
577 kfree(cur_td);
578 597
579 spin_lock(&xhci->lock); 598 /* Stop processing the cancelled list if the watchdog timer is
599 * running.
600 */
601 if (xhci->xhc_state & XHCI_STATE_DYING)
602 return;
580 } while (cur_td != last_unlinked_td); 603 } while (cur_td != last_unlinked_td);
581 604
582 /* Return to the event handler with xhci->lock re-acquired */ 605 /* Return to the event handler with xhci->lock re-acquired */
583} 606}
584 607
608/* Watchdog timer function for when a stop endpoint command fails to complete.
609 * In this case, we assume the host controller is broken or dying or dead. The
610 * host may still be completing some other events, so we have to be careful to
611 * let the event ring handler and the URB dequeueing/enqueueing functions know
612 * through xhci->state.
613 *
614 * The timer may also fire if the host takes a very long time to respond to the
615 * command, and the stop endpoint command completion handler cannot delete the
616 * timer before the timer function is called. Another endpoint cancellation may
617 * sneak in before the timer function can grab the lock, and that may queue
618 * another stop endpoint command and add the timer back. So we cannot use a
619 * simple flag to say whether there is a pending stop endpoint command for a
620 * particular endpoint.
621 *
622 * Instead we use a combination of that flag and a counter for the number of
623 * pending stop endpoint commands. If the timer is the tail end of the last
624 * stop endpoint command, and the endpoint's command is still pending, we assume
625 * the host is dying.
626 */
627void xhci_stop_endpoint_command_watchdog(unsigned long arg)
628{
629 struct xhci_hcd *xhci;
630 struct xhci_virt_ep *ep;
631 struct xhci_virt_ep *temp_ep;
632 struct xhci_ring *ring;
633 struct xhci_td *cur_td;
634 int ret, i, j;
635
636 ep = (struct xhci_virt_ep *) arg;
637 xhci = ep->xhci;
638
639 spin_lock(&xhci->lock);
640
641 ep->stop_cmds_pending--;
642 if (xhci->xhc_state & XHCI_STATE_DYING) {
643 xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
644 "xHCI as DYING, exiting.\n");
645 spin_unlock(&xhci->lock);
646 return;
647 }
648 if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
649 xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
650 "exiting.\n");
651 spin_unlock(&xhci->lock);
652 return;
653 }
654
655 xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
656 xhci_warn(xhci, "Assuming host is dying, halting host.\n");
657 /* Oops, HC is dead or dying or at least not responding to the stop
658 * endpoint command.
659 */
660 xhci->xhc_state |= XHCI_STATE_DYING;
661 /* Disable interrupts from the host controller and start halting it */
662 xhci_quiesce(xhci);
663 spin_unlock(&xhci->lock);
664
665 ret = xhci_halt(xhci);
666
667 spin_lock(&xhci->lock);
668 if (ret < 0) {
669 /* This is bad; the host is not responding to commands and it's
670 * not allowing itself to be halted. At least interrupts are
671 * disabled, so we can set HC_STATE_HALT and notify the
672 * USB core. But if we call usb_hc_died(), it will attempt to
673 * disconnect all device drivers under this host. Those
674 * disconnect() methods will wait for all URBs to be unlinked,
675 * so we must complete them.
676 */
677 xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
678 xhci_warn(xhci, "Completing active URBs anyway.\n");
679 /* We could turn all TDs on the rings to no-ops. This won't
680 * help if the host has cached part of the ring, and is slow if
681 * we want to preserve the cycle bit. Skip it and hope the host
682 * doesn't touch the memory.
683 */
684 }
685 for (i = 0; i < MAX_HC_SLOTS; i++) {
686 if (!xhci->devs[i])
687 continue;
688 for (j = 0; j < 31; j++) {
689 temp_ep = &xhci->devs[i]->eps[j];
690 ring = temp_ep->ring;
691 if (!ring)
692 continue;
693 xhci_dbg(xhci, "Killing URBs for slot ID %u, "
694 "ep index %u\n", i, j);
695 while (!list_empty(&ring->td_list)) {
696 cur_td = list_first_entry(&ring->td_list,
697 struct xhci_td,
698 td_list);
699 list_del(&cur_td->td_list);
700 if (!list_empty(&cur_td->cancelled_td_list))
701 list_del(&cur_td->cancelled_td_list);
702 xhci_giveback_urb_in_irq(xhci, cur_td,
703 -ESHUTDOWN, "killed");
704 }
705 while (!list_empty(&temp_ep->cancelled_td_list)) {
706 cur_td = list_first_entry(
707 &temp_ep->cancelled_td_list,
708 struct xhci_td,
709 cancelled_td_list);
710 list_del(&cur_td->cancelled_td_list);
711 xhci_giveback_urb_in_irq(xhci, cur_td,
712 -ESHUTDOWN, "killed");
713 }
714 }
715 }
716 spin_unlock(&xhci->lock);
717 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
718 xhci_dbg(xhci, "Calling usb_hc_died()\n");
719 usb_hc_died(xhci_to_hcd(xhci));
720 xhci_dbg(xhci, "xHCI host controller is dead.\n");
721}
722
585/* 723/*
586 * When we get a completion for a Set Transfer Ring Dequeue Pointer command, 724 * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
587 * we need to clear the set deq pending flag in the endpoint ring state, so that 725 * we need to clear the set deq pending flag in the endpoint ring state, so that
@@ -765,28 +903,32 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
765 virt_dev->in_ctx); 903 virt_dev->in_ctx);
766 /* Input ctx add_flags are the endpoint index plus one */ 904 /* Input ctx add_flags are the endpoint index plus one */
767 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1; 905 ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
768 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; 906 /* A usb_set_interface() call directly after clearing a halted
769 if (!ep_ring) { 907 * condition may race on this quirky hardware.
770 /* This must have been an initial configure endpoint */ 908 * Not worth worrying about, since this is prototype hardware.
771 xhci->devs[slot_id]->cmd_status = 909 */
772 GET_COMP_CODE(event->status);
773 complete(&xhci->devs[slot_id]->cmd_completion);
774 break;
775 }
776 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
777 xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
778 "state = %d\n", ep_index, ep_state);
779 if (xhci->quirks & XHCI_RESET_EP_QUIRK && 910 if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
780 ep_state & EP_HALTED) { 911 ep_index != (unsigned int) -1 &&
912 ctrl_ctx->add_flags - SLOT_FLAG ==
913 ctrl_ctx->drop_flags) {
914 ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
915 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
916 if (!(ep_state & EP_HALTED))
917 goto bandwidth_change;
918 xhci_dbg(xhci, "Completed config ep cmd - "
919 "last ep index = %d, state = %d\n",
920 ep_index, ep_state);
781 /* Clear our internal halted state and restart ring */ 921 /* Clear our internal halted state and restart ring */
782 xhci->devs[slot_id]->eps[ep_index].ep_state &= 922 xhci->devs[slot_id]->eps[ep_index].ep_state &=
783 ~EP_HALTED; 923 ~EP_HALTED;
784 ring_ep_doorbell(xhci, slot_id, ep_index); 924 ring_ep_doorbell(xhci, slot_id, ep_index);
785 } else { 925 break;
786 xhci->devs[slot_id]->cmd_status =
787 GET_COMP_CODE(event->status);
788 complete(&xhci->devs[slot_id]->cmd_completion);
789 } 926 }
927bandwidth_change:
928 xhci_dbg(xhci, "Completed config ep cmd\n");
929 xhci->devs[slot_id]->cmd_status =
930 GET_COMP_CODE(event->status);
931 complete(&xhci->devs[slot_id]->cmd_completion);
790 break; 932 break;
791 case TRB_TYPE(TRB_EVAL_CONTEXT): 933 case TRB_TYPE(TRB_EVAL_CONTEXT):
792 virt_dev = xhci->devs[slot_id]; 934 virt_dev = xhci->devs[slot_id];
@@ -849,8 +991,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
849 * TRB in this TD, this function returns that TRB's segment. Otherwise it 991 * TRB in this TD, this function returns that TRB's segment. Otherwise it
850 * returns 0. 992 * returns 0.
851 */ 993 */
852static struct xhci_segment *trb_in_td( 994struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
853 struct xhci_segment *start_seg,
854 union xhci_trb *start_trb, 995 union xhci_trb *start_trb,
855 union xhci_trb *end_trb, 996 union xhci_trb *end_trb,
856 dma_addr_t suspect_dma) 997 dma_addr_t suspect_dma)
@@ -900,6 +1041,45 @@ static struct xhci_segment *trb_in_td(
900 return 0; 1041 return 0;
901} 1042}
902 1043
1044static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
1045 unsigned int slot_id, unsigned int ep_index,
1046 struct xhci_td *td, union xhci_trb *event_trb)
1047{
1048 struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
1049 ep->ep_state |= EP_HALTED;
1050 ep->stopped_td = td;
1051 ep->stopped_trb = event_trb;
1052 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1053 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1054 xhci_ring_cmd_db(xhci);
1055}
1056
1057/* Check if an error has halted the endpoint ring. The class driver will
1058 * cleanup the halt for a non-default control endpoint if we indicate a stall.
1059 * However, a babble and other errors also halt the endpoint ring, and the class
1060 * driver won't clear the halt in that case, so we need to issue a Set Transfer
1061 * Ring Dequeue Pointer command manually.
1062 */
1063static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
1064 struct xhci_ep_ctx *ep_ctx,
1065 unsigned int trb_comp_code)
1066{
1067 /* TRB completion codes that may require a manual halt cleanup */
1068 if (trb_comp_code == COMP_TX_ERR ||
1069 trb_comp_code == COMP_BABBLE ||
1070 trb_comp_code == COMP_SPLIT_ERR)
1071 /* The 0.96 spec says a babbling control endpoint
1072 * is not halted. The 0.96 spec says it is. Some HW
1073 * claims to be 0.95 compliant, but it halts the control
1074 * endpoint anyway. Check if a babble halted the
1075 * endpoint.
1076 */
1077 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
1078 return 1;
1079
1080 return 0;
1081}
1082
903/* 1083/*
904 * If this function returns an error condition, it means it got a Transfer 1084 * If this function returns an error condition, it means it got a Transfer
905 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address. 1085 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1002,6 +1182,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1002 xhci_warn(xhci, "WARN: TRB error on endpoint\n"); 1182 xhci_warn(xhci, "WARN: TRB error on endpoint\n");
1003 status = -EILSEQ; 1183 status = -EILSEQ;
1004 break; 1184 break;
1185 case COMP_SPLIT_ERR:
1005 case COMP_TX_ERR: 1186 case COMP_TX_ERR:
1006 xhci_warn(xhci, "WARN: transfer error on endpoint\n"); 1187 xhci_warn(xhci, "WARN: transfer error on endpoint\n");
1007 status = -EPROTO; 1188 status = -EPROTO;
@@ -1015,6 +1196,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1015 status = -ENOSR; 1196 status = -ENOSR;
1016 break; 1197 break;
1017 default: 1198 default:
1199 if (trb_comp_code >= 224 && trb_comp_code <= 255) {
1200 /* Vendor defined "informational" completion code,
1201 * treat as not-an-error.
1202 */
1203 xhci_dbg(xhci, "Vendor defined info completion code %u\n",
1204 trb_comp_code);
1205 xhci_dbg(xhci, "Treating code as success.\n");
1206 status = 0;
1207 break;
1208 }
1018 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n"); 1209 xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
1019 urb = NULL; 1210 urb = NULL;
1020 goto cleanup; 1211 goto cleanup;
@@ -1043,15 +1234,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1043 else 1234 else
1044 status = 0; 1235 status = 0;
1045 break; 1236 break;
1046 case COMP_BABBLE: 1237
1047 /* The 0.96 spec says a babbling control endpoint 1238 default:
1048 * is not halted. The 0.96 spec says it is. Some HW 1239 if (!xhci_requires_manual_halt_cleanup(xhci,
1049 * claims to be 0.95 compliant, but it halts the control 1240 ep_ctx, trb_comp_code))
1050 * endpoint anyway. Check if a babble halted the
1051 * endpoint.
1052 */
1053 if (ep_ctx->ep_info != EP_STATE_HALTED)
1054 break; 1241 break;
1242 xhci_dbg(xhci, "TRB error code %u, "
1243 "halted endpoint index = %u\n",
1244 trb_comp_code, ep_index);
1055 /* else fall through */ 1245 /* else fall through */
1056 case COMP_STALL: 1246 case COMP_STALL:
1057 /* Did we transfer part of the data (middle) phase? */ 1247 /* Did we transfer part of the data (middle) phase? */
@@ -1063,15 +1253,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1063 else 1253 else
1064 td->urb->actual_length = 0; 1254 td->urb->actual_length = 0;
1065 1255
1066 ep->stopped_td = td; 1256 xhci_cleanup_halted_endpoint(xhci,
1067 ep->stopped_trb = event_trb; 1257 slot_id, ep_index, td, event_trb);
1068 xhci_queue_reset_ep(xhci, slot_id, ep_index);
1069 xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
1070 xhci_ring_cmd_db(xhci);
1071 goto td_cleanup; 1258 goto td_cleanup;
1072 default:
1073 /* Others already handled above */
1074 break;
1075 } 1259 }
1076 /* 1260 /*
1077 * Did we transfer any data, despite the errors that might have 1261 * Did we transfer any data, despite the errors that might have
@@ -1209,16 +1393,25 @@ static int handle_tx_event(struct xhci_hcd *xhci,
1209 ep->stopped_td = td; 1393 ep->stopped_td = td;
1210 ep->stopped_trb = event_trb; 1394 ep->stopped_trb = event_trb;
1211 } else { 1395 } else {
1212 if (trb_comp_code == COMP_STALL || 1396 if (trb_comp_code == COMP_STALL) {
1213 trb_comp_code == COMP_BABBLE) {
1214 /* The transfer is completed from the driver's 1397 /* The transfer is completed from the driver's
1215 * perspective, but we need to issue a set dequeue 1398 * perspective, but we need to issue a set dequeue
1216 * command for this stalled endpoint to move the dequeue 1399 * command for this stalled endpoint to move the dequeue
1217 * pointer past the TD. We can't do that here because 1400 * pointer past the TD. We can't do that here because
1218 * the halt condition must be cleared first. 1401 * the halt condition must be cleared first. Let the
1402 * USB class driver clear the stall later.
1219 */ 1403 */
1220 ep->stopped_td = td; 1404 ep->stopped_td = td;
1221 ep->stopped_trb = event_trb; 1405 ep->stopped_trb = event_trb;
1406 } else if (xhci_requires_manual_halt_cleanup(xhci,
1407 ep_ctx, trb_comp_code)) {
1408 /* Other types of errors halt the endpoint, but the
1409 * class driver doesn't call usb_reset_endpoint() unless
1410 * the error is -EPIPE. Clear the halted status in the
1411 * xHCI hardware manually.
1412 */
1413 xhci_cleanup_halted_endpoint(xhci,
1414 slot_id, ep_index, td, event_trb);
1222 } else { 1415 } else {
1223 /* Update ring dequeue pointer */ 1416 /* Update ring dequeue pointer */
1224 while (ep_ring->dequeue != td->last_trb) 1417 while (ep_ring->dequeue != td->last_trb)
@@ -1249,10 +1442,9 @@ td_cleanup:
1249 } 1442 }
1250 list_del(&td->td_list); 1443 list_del(&td->td_list);
1251 /* Was this TD slated to be cancelled but completed anyway? */ 1444 /* Was this TD slated to be cancelled but completed anyway? */
1252 if (!list_empty(&td->cancelled_td_list)) { 1445 if (!list_empty(&td->cancelled_td_list))
1253 list_del(&td->cancelled_td_list); 1446 list_del(&td->cancelled_td_list);
1254 ep->cancels_pending--; 1447
1255 }
1256 /* Leave the TD around for the reset endpoint function to use 1448 /* Leave the TD around for the reset endpoint function to use
1257 * (but only if it's not a control endpoint, since we already 1449 * (but only if it's not a control endpoint, since we already
1258 * queued the Set TR dequeue pointer command for stalled 1450 * queued the Set TR dequeue pointer command for stalled
@@ -1331,6 +1523,14 @@ void xhci_handle_event(struct xhci_hcd *xhci)
1331 default: 1523 default:
1332 xhci->error_bitmask |= 1 << 3; 1524 xhci->error_bitmask |= 1 << 3;
1333 } 1525 }
1526 /* Any of the above functions may drop and re-acquire the lock, so check
1527 * to make sure a watchdog timer didn't mark the host as non-responsive.
1528 */
1529 if (xhci->xhc_state & XHCI_STATE_DYING) {
1530 xhci_dbg(xhci, "xHCI host dying, returning from "
1531 "event handler.\n");
1532 return;
1533 }
1334 1534
1335 if (update_ptrs) { 1535 if (update_ptrs) {
1336 /* Update SW and HC event ring dequeue pointer */ 1536 /* Update SW and HC event ring dequeue pointer */
@@ -1555,6 +1755,21 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1555 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 1755 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
1556} 1756}
1557 1757
1758/*
1759 * The TD size is the number of bytes remaining in the TD (including this TRB),
1760 * right shifted by 10.
1761 * It must fit in bits 21:17, so it can't be bigger than 31.
1762 */
1763static u32 xhci_td_remainder(unsigned int remainder)
1764{
1765 u32 max = (1 << (21 - 17 + 1)) - 1;
1766
1767 if ((remainder >> 10) >= max)
1768 return max << 17;
1769 else
1770 return (remainder >> 10) << 17;
1771}
1772
1558static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, 1773static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1559 struct urb *urb, int slot_id, unsigned int ep_index) 1774 struct urb *urb, int slot_id, unsigned int ep_index)
1560{ 1775{
@@ -1612,6 +1827,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1612 do { 1827 do {
1613 u32 field = 0; 1828 u32 field = 0;
1614 u32 length_field = 0; 1829 u32 length_field = 0;
1830 u32 remainder = 0;
1615 1831
1616 /* Don't change the cycle bit of the first TRB until later */ 1832 /* Don't change the cycle bit of the first TRB until later */
1617 if (first_trb) 1833 if (first_trb)
@@ -1641,8 +1857,10 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1641 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 1857 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
1642 (unsigned int) addr + trb_buff_len); 1858 (unsigned int) addr + trb_buff_len);
1643 } 1859 }
1860 remainder = xhci_td_remainder(urb->transfer_buffer_length -
1861 running_total) ;
1644 length_field = TRB_LEN(trb_buff_len) | 1862 length_field = TRB_LEN(trb_buff_len) |
1645 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1863 remainder |
1646 TRB_INTR_TARGET(0); 1864 TRB_INTR_TARGET(0);
1647 queue_trb(xhci, ep_ring, false, 1865 queue_trb(xhci, ep_ring, false,
1648 lower_32_bits(addr), 1866 lower_32_bits(addr),
@@ -1755,6 +1973,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1755 1973
1756 /* Queue the first TRB, even if it's zero-length */ 1974 /* Queue the first TRB, even if it's zero-length */
1757 do { 1975 do {
1976 u32 remainder = 0;
1758 field = 0; 1977 field = 0;
1759 1978
1760 /* Don't change the cycle bit of the first TRB until later */ 1979 /* Don't change the cycle bit of the first TRB until later */
@@ -1773,8 +1992,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1773 td->last_trb = ep_ring->enqueue; 1992 td->last_trb = ep_ring->enqueue;
1774 field |= TRB_IOC; 1993 field |= TRB_IOC;
1775 } 1994 }
1995 remainder = xhci_td_remainder(urb->transfer_buffer_length -
1996 running_total);
1776 length_field = TRB_LEN(trb_buff_len) | 1997 length_field = TRB_LEN(trb_buff_len) |
1777 TD_REMAINDER(urb->transfer_buffer_length - running_total) | 1998 remainder |
1778 TRB_INTR_TARGET(0); 1999 TRB_INTR_TARGET(0);
1779 queue_trb(xhci, ep_ring, false, 2000 queue_trb(xhci, ep_ring, false,
1780 lower_32_bits(addr), 2001 lower_32_bits(addr),
@@ -1862,7 +2083,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
1862 /* If there's data, queue data TRBs */ 2083 /* If there's data, queue data TRBs */
1863 field = 0; 2084 field = 0;
1864 length_field = TRB_LEN(urb->transfer_buffer_length) | 2085 length_field = TRB_LEN(urb->transfer_buffer_length) |
1865 TD_REMAINDER(urb->transfer_buffer_length) | 2086 xhci_td_remainder(urb->transfer_buffer_length) |
1866 TRB_INTR_TARGET(0); 2087 TRB_INTR_TARGET(0);
1867 if (urb->transfer_buffer_length > 0) { 2088 if (urb->transfer_buffer_length > 0) {
1868 if (setup->bRequestType & USB_DIR_IN) 2089 if (setup->bRequestType & USB_DIR_IN)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 4b254b6fa245..877813505ef2 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -652,13 +652,17 @@ struct xhci_virt_ep {
652 struct xhci_ring *new_ring; 652 struct xhci_ring *new_ring;
653 unsigned int ep_state; 653 unsigned int ep_state;
654#define SET_DEQ_PENDING (1 << 0) 654#define SET_DEQ_PENDING (1 << 0)
655#define EP_HALTED (1 << 1) 655#define EP_HALTED (1 << 1) /* For stall handling */
656#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
656 /* ---- Related to URB cancellation ---- */ 657 /* ---- Related to URB cancellation ---- */
657 struct list_head cancelled_td_list; 658 struct list_head cancelled_td_list;
658 unsigned int cancels_pending;
659 /* The TRB that was last reported in a stopped endpoint ring */ 659 /* The TRB that was last reported in a stopped endpoint ring */
660 union xhci_trb *stopped_trb; 660 union xhci_trb *stopped_trb;
661 struct xhci_td *stopped_td; 661 struct xhci_td *stopped_td;
662 /* Watchdog timer for stop endpoint command to cancel URBs */
663 struct timer_list stop_cmd_timer;
664 int stop_cmds_pending;
665 struct xhci_hcd *xhci;
662}; 666};
663 667
664struct xhci_virt_device { 668struct xhci_virt_device {
@@ -673,6 +677,10 @@ struct xhci_virt_device {
673 struct xhci_container_ctx *out_ctx; 677 struct xhci_container_ctx *out_ctx;
674 /* Used for addressing devices and configuration changes */ 678 /* Used for addressing devices and configuration changes */
675 struct xhci_container_ctx *in_ctx; 679 struct xhci_container_ctx *in_ctx;
680 /* Rings saved to ensure old alt settings can be re-instated */
681 struct xhci_ring **ring_cache;
682 int num_rings_cached;
683#define XHCI_MAX_RINGS_CACHED 31
676 struct xhci_virt_ep eps[31]; 684 struct xhci_virt_ep eps[31];
677 struct completion cmd_completion; 685 struct completion cmd_completion;
678 /* Status of the last command issued for this device */ 686 /* Status of the last command issued for this device */
@@ -824,9 +832,6 @@ struct xhci_event_cmd {
824/* Normal TRB fields */ 832/* Normal TRB fields */
825/* transfer_len bitmasks - bits 0:16 */ 833/* transfer_len bitmasks - bits 0:16 */
826#define TRB_LEN(p) ((p) & 0x1ffff) 834#define TRB_LEN(p) ((p) & 0x1ffff)
827/* TD size - number of bytes remaining in the TD (including this TRB):
828 * bits 17 - 21. Shift the number of bytes by 10. */
829#define TD_REMAINDER(p) ((((p) >> 10) & 0x1f) << 17)
830/* Interrupter Target - which MSI-X vector to target the completion event at */ 835/* Interrupter Target - which MSI-X vector to target the completion event at */
831#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22) 836#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
832#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff) 837#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
@@ -1022,6 +1027,8 @@ struct xhci_scratchpad {
1022#define ERST_ENTRIES 1 1027#define ERST_ENTRIES 1
1023/* Poll every 60 seconds */ 1028/* Poll every 60 seconds */
1024#define POLL_TIMEOUT 60 1029#define POLL_TIMEOUT 60
1030/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
1031#define XHCI_STOP_EP_CMD_TIMEOUT 5
1025/* XXX: Make these module parameters */ 1032/* XXX: Make these module parameters */
1026 1033
1027 1034
@@ -1083,6 +1090,21 @@ struct xhci_hcd {
1083 struct timer_list event_ring_timer; 1090 struct timer_list event_ring_timer;
1084 int zombie; 1091 int zombie;
1085#endif 1092#endif
1093 /* Host controller watchdog timer structures */
1094 unsigned int xhc_state;
1095/* Host controller is dying - not responding to commands. "I'm not dead yet!"
1096 *
1097 * xHC interrupts have been disabled and a watchdog timer will (or has already)
1098 * halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code
1099 * that sees this status (other than the timer that set it) should stop touching
1100 * hardware immediately. Interrupt handlers should return immediately when
1101 * they see this status (any time they drop and re-acquire xhci->lock).
1102 * xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
1103 * putting the TD on the canceled list, etc.
1104 *
1105 * There are no reports of xHCI host controllers that display this issue.
1106 */
1107#define XHCI_STATE_DYING (1 << 0)
1086 /* Statistics */ 1108 /* Statistics */
1087 int noops_submitted; 1109 int noops_submitted;
1088 int noops_handled; 1110 int noops_handled;
@@ -1223,6 +1245,7 @@ void xhci_unregister_pci(void);
1223#endif 1245#endif
1224 1246
1225/* xHCI host controller glue */ 1247/* xHCI host controller glue */
1248void xhci_quiesce(struct xhci_hcd *xhci);
1226int xhci_halt(struct xhci_hcd *xhci); 1249int xhci_halt(struct xhci_hcd *xhci);
1227int xhci_reset(struct xhci_hcd *xhci); 1250int xhci_reset(struct xhci_hcd *xhci);
1228int xhci_init(struct usb_hcd *hcd); 1251int xhci_init(struct usb_hcd *hcd);
@@ -1246,6 +1269,9 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
1246 1269
1247/* xHCI ring, segment, TRB, and TD functions */ 1270/* xHCI ring, segment, TRB, and TD functions */
1248dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb); 1271dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
1272struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
1273 union xhci_trb *start_trb, union xhci_trb *end_trb,
1274 dma_addr_t suspect_dma);
1249void xhci_ring_cmd_db(struct xhci_hcd *xhci); 1275void xhci_ring_cmd_db(struct xhci_hcd *xhci);
1250void *xhci_setup_one_noop(struct xhci_hcd *xhci); 1276void *xhci_setup_one_noop(struct xhci_hcd *xhci);
1251void xhci_handle_event(struct xhci_hcd *xhci); 1277void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1278,6 +1304,7 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1278void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, 1304void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
1279 unsigned int slot_id, unsigned int ep_index, 1305 unsigned int slot_id, unsigned int ep_index,
1280 struct xhci_dequeue_state *deq_state); 1306 struct xhci_dequeue_state *deq_state);
1307void xhci_stop_endpoint_command_watchdog(unsigned long arg);
1281 1308
1282/* xHCI roothub code */ 1309/* xHCI roothub code */
1283int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, 1310int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index a9f06d76960f..3dab0c0b196f 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -213,8 +213,9 @@ static struct urb *simple_alloc_urb (
213} 213}
214 214
215static unsigned pattern = 0; 215static unsigned pattern = 0;
216module_param (pattern, uint, S_IRUGO); 216static unsigned mod_pattern;
217MODULE_PARM_DESC(pattern, "i/o pattern (0 == zeroes)"); 217module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
218MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
218 219
219static inline void simple_fill_buf (struct urb *urb) 220static inline void simple_fill_buf (struct urb *urb)
220{ 221{
@@ -1567,6 +1568,8 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
1567 1568
1568 // FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. 1569 // FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is.
1569 1570
1571 pattern = mod_pattern;
1572
1570 if (code != USBTEST_REQUEST) 1573 if (code != USBTEST_REQUEST)
1571 return -EOPNOTSUPP; 1574 return -EOPNOTSUPP;
1572 1575
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 10f3205798e8..385ec0520167 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -16,6 +16,7 @@
16#include <linux/compat.h> 16#include <linux/compat.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
19#include <linux/scatterlist.h>
19 20
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
21 22
@@ -221,7 +222,7 @@ static void mon_free_buff(struct mon_pgmap *map, int npages);
221/* 222/*
222 * This is a "chunked memcpy". It does not manipulate any counters. 223 * This is a "chunked memcpy". It does not manipulate any counters.
223 */ 224 */
224static void mon_copy_to_buff(const struct mon_reader_bin *this, 225static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
225 unsigned int off, const unsigned char *from, unsigned int length) 226 unsigned int off, const unsigned char *from, unsigned int length)
226{ 227{
227 unsigned int step_len; 228 unsigned int step_len;
@@ -246,6 +247,7 @@ static void mon_copy_to_buff(const struct mon_reader_bin *this,
246 from += step_len; 247 from += step_len;
247 length -= step_len; 248 length -= step_len;
248 } 249 }
250 return off;
249} 251}
250 252
251/* 253/*
@@ -394,14 +396,44 @@ static inline char mon_bin_get_setup(unsigned char *setupb,
394 return 0; 396 return 0;
395} 397}
396 398
397static char mon_bin_get_data(const struct mon_reader_bin *rp, 399static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp,
398 unsigned int offset, struct urb *urb, unsigned int length) 400 unsigned int offset, struct urb *urb, unsigned int length,
401 char *flag)
399{ 402{
403 int i;
404 struct scatterlist *sg;
405 unsigned int this_len;
406
407 *flag = 0;
408 if (urb->num_sgs == 0) {
409 if (urb->transfer_buffer == NULL) {
410 *flag = 'Z';
411 return length;
412 }
413 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
414 length = 0;
400 415
401 if (urb->transfer_buffer == NULL) 416 } else {
402 return 'Z'; 417 /* If IOMMU coalescing occurred, we cannot trust sg_page */
403 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 418 if (urb->sg->nents != urb->num_sgs) {
404 return 0; 419 *flag = 'D';
420 return length;
421 }
422
423 /* Copy up to the first non-addressable segment */
424 for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
425 if (length == 0 || PageHighMem(sg_page(sg)))
426 break;
427 this_len = min_t(unsigned int, sg->length, length);
428 offset = mon_copy_to_buff(rp, offset, sg_virt(sg),
429 this_len);
430 length -= this_len;
431 }
432 if (i == 0)
433 *flag = 'D';
434 }
435
436 return length;
405} 437}
406 438
407static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, 439static void mon_bin_get_isodesc(const struct mon_reader_bin *rp,
@@ -536,8 +568,9 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
536 } 568 }
537 569
538 if (length != 0) { 570 if (length != 0) {
539 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 571 length = mon_bin_get_data(rp, offset, urb, length,
540 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 572 &ep->flag_data);
573 if (length > 0) {
541 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 574 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
542 ep->len_cap -= length; 575 ep->len_cap -= length;
543 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 576 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 9f1a9227ebe6..047568ff223d 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -10,6 +10,7 @@
10#include <linux/time.h> 10#include <linux/time.h>
11#include <linux/mutex.h> 11#include <linux/mutex.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/scatterlist.h>
13#include <asm/uaccess.h> 14#include <asm/uaccess.h>
14 15
15#include "usb_mon.h" 16#include "usb_mon.h"
@@ -137,6 +138,8 @@ static inline char mon_text_get_setup(struct mon_event_text *ep,
137static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, 138static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
138 int len, char ev_type, struct mon_bus *mbus) 139 int len, char ev_type, struct mon_bus *mbus)
139{ 140{
141 void *src;
142
140 if (len <= 0) 143 if (len <= 0)
141 return 'L'; 144 return 'L';
142 if (len >= DATA_MAX) 145 if (len >= DATA_MAX)
@@ -150,10 +153,24 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
150 return '>'; 153 return '>';
151 } 154 }
152 155
153 if (urb->transfer_buffer == NULL) 156 if (urb->num_sgs == 0) {
154 return 'Z'; /* '0' would be not as pretty. */ 157 src = urb->transfer_buffer;
158 if (src == NULL)
159 return 'Z'; /* '0' would be not as pretty. */
160 } else {
161 struct scatterlist *sg = urb->sg->sg;
162
163 /* If IOMMU coalescing occurred, we cannot trust sg_page */
164 if (urb->sg->nents != urb->num_sgs ||
165 PageHighMem(sg_page(sg)))
166 return 'D';
167
168 /* For the text interface we copy only the first sg buffer */
169 len = min_t(int, sg->length, len);
170 src = sg_virt(sg);
171 }
155 172
156 memcpy(ep->data, urb->transfer_buffer, len); 173 memcpy(ep->data, src, len);
157 return 0; 174 return 0;
158} 175}
159 176
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index b84abd8ee8a5..d9db86498022 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -9,10 +9,9 @@ comment "Enable Host or Gadget support to see Inventra options"
9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller 9# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
10config USB_MUSB_HDRC 10config USB_MUSB_HDRC
11 depends on (USB || USB_GADGET) 11 depends on (USB || USB_GADGET)
12 depends on (ARM || BLACKFIN) 12 depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
13 select NOP_USB_XCEIV if ARCH_DAVINCI 13 select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
14 select TWL4030_USB if MACH_OMAP_3430SDP 14 select TWL4030_USB if MACH_OMAP_3430SDP
15 select NOP_USB_XCEIV if MACH_OMAP3EVM
16 select USB_OTG_UTILS 15 select USB_OTG_UTILS
17 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' 16 tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
18 help 17 help
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index fcec87ea709e..fe4934d9602c 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -53,13 +53,11 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
53void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) 53void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
54{ 54{
55 void __iomem *fifo = hw_ep->fifo; 55 void __iomem *fifo = hw_ep->fifo;
56
57#ifdef CONFIG_BF52x
56 u8 epnum = hw_ep->epnum; 58 u8 epnum = hw_ep->epnum;
57 u16 dma_reg = 0; 59 u16 dma_reg = 0;
58 60
59 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
60 'R', hw_ep->epnum, fifo, len, dst);
61
62#ifdef CONFIG_BF52x
63 invalidate_dcache_range((unsigned int)dst, 61 invalidate_dcache_range((unsigned int)dst,
64 (unsigned int)(dst + len)); 62 (unsigned int)(dst + len));
65 63
@@ -102,6 +100,9 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
102 len & 0x01 ? (len >> 1) + 1 : len >> 1); 100 len & 0x01 ? (len >> 1) + 1 : len >> 1);
103#endif 101#endif
104 102
103 DBG(4, "%cX ep%d fifo %p count %d buf %p\n",
104 'R', hw_ep->epnum, fifo, len, dst);
105
105 dump_fifo_data(dst, len); 106 dump_fifo_data(dst, len);
106} 107}
107 108
@@ -225,8 +226,9 @@ int musb_platform_get_vbus_status(struct musb *musb)
225 return 0; 226 return 0;
226} 227}
227 228
228void musb_platform_set_mode(struct musb *musb, u8 musb_mode) 229int musb_platform_set_mode(struct musb *musb, u8 musb_mode)
229{ 230{
231 return -EIO;
230} 232}
231 233
232int __init musb_platform_init(struct musb *musb) 234int __init musb_platform_init(struct musb *musb)
@@ -261,10 +263,6 @@ int __init musb_platform_init(struct musb *musb)
261 SSYNC(); 263 SSYNC();
262 } 264 }
263 265
264 /* TODO
265 * Set SIC-IVG register
266 */
267
268 /* Configure PLL oscillator register */ 266 /* Configure PLL oscillator register */
269 bfin_write_USB_PLLOSC_CTRL(0x30a8); 267 bfin_write_USB_PLLOSC_CTRL(0x30a8);
270 SSYNC(); 268 SSYNC();
diff --git a/drivers/usb/musb/blackfin.h b/drivers/usb/musb/blackfin.h
index a240c1e53d16..10b7d7584f4b 100644
--- a/drivers/usb/musb/blackfin.h
+++ b/drivers/usb/musb/blackfin.h
@@ -14,6 +14,43 @@
14 * Blackfin specific definitions 14 * Blackfin specific definitions
15 */ 15 */
16 16
17/* Anomalies notes:
18 *
19 * 05000450 - USB DMA Mode 1 Short Packet Data Corruption:
20 * MUSB driver is designed to transfer buffer of N * maxpacket size
21 * in DMA mode 1 and leave the rest of the data to the next
22 * transfer in DMA mode 0, so we never transmit a short packet in
23 * DMA mode 1.
24 *
25 * 05000463 - This anomaly doesn't affect this driver since it
26 * never uses L1 or L2 memory as data destination.
27 *
28 * 05000464 - This anomaly doesn't affect this driver since it
29 * never uses L1 or L2 memory as data source.
30 *
31 * 05000465 - The anomaly can be seen when SCLK is over 100 MHz, and there is
32 * no way to workaround for bulk endpoints. Since the wMaxPackSize
33 * of bulk is less than or equal to 512, while the fifo size of
34 * endpoint 5, 6, 7 is 1024, the double buffer mode is enabled
35 * automatically when these endpoints are used for bulk OUT.
36 *
37 * 05000466 - This anomaly doesn't affect this driver since it never mixes
38 * concurrent DMA and core accesses to the TX endpoint FIFOs.
39 *
40 * 05000467 - The workaround for this anomaly will introduce another
41 * anomaly - 05000465.
42 */
43
44/* The Mentor USB DMA engine on BF52x (silicon v0.0 and v0.1) seems to be
45 * unstable in host mode. This may be caused by Anomaly 05000380. After
46 * digging out the root cause, we will change this number accordingly.
47 * So, need to either use silicon v0.2+ or disable DMA mode in MUSB.
48 */
49#if ANOMALY_05000380 && defined(CONFIG_BF52x) && \
50 defined(CONFIG_USB_MUSB_HDRC) && !defined(CONFIG_MUSB_PIO_ONLY)
51# error "Please use PIO mode in MUSB driver on bf52x chip v0.0 and v0.1"
52#endif
53
17#undef DUMP_FIFO_DATA 54#undef DUMP_FIFO_DATA
18#ifdef DUMP_FIFO_DATA 55#ifdef DUMP_FIFO_DATA
19static void dump_fifo_data(u8 *buf, u16 len) 56static void dump_fifo_data(u8 *buf, u16 len)
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 547e0e390726..49f2346afad3 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1319,7 +1319,6 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1319#endif 1319#endif
1320 u8 reg; 1320 u8 reg;
1321 char *type; 1321 char *type;
1322 u16 hwvers, rev_major, rev_minor;
1323 char aInfo[78], aRevision[32], aDate[12]; 1322 char aInfo[78], aRevision[32], aDate[12];
1324 void __iomem *mbase = musb->mregs; 1323 void __iomem *mbase = musb->mregs;
1325 int status = 0; 1324 int status = 0;
@@ -1391,11 +1390,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
1391 } 1390 }
1392 1391
1393 /* log release info */ 1392 /* log release info */
1394 hwvers = musb_read_hwvers(mbase); 1393 musb->hwvers = musb_read_hwvers(mbase);
1395 rev_major = (hwvers >> 10) & 0x1f; 1394 snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
1396 rev_minor = hwvers & 0x3ff; 1395 MUSB_HWVERS_MINOR(musb->hwvers),
1397 snprintf(aRevision, 32, "%d.%d%s", rev_major, 1396 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1398 rev_minor, (hwvers & 0x8000) ? "RC" : "");
1399 printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", 1397 printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
1400 musb_driver_name, type, aRevision, aDate); 1398 musb_driver_name, type, aRevision, aDate);
1401 1399
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 6aa5f22e5274..03d50909b078 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -322,6 +322,14 @@ struct musb {
322 struct clk *clock; 322 struct clk *clock;
323 irqreturn_t (*isr)(int, void *); 323 irqreturn_t (*isr)(int, void *);
324 struct work_struct irq_work; 324 struct work_struct irq_work;
325#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
326#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
327#define MUSB_HWVERS_RC 0x8000
328#define MUSB_HWVERS_1300 0x52C
329#define MUSB_HWVERS_1400 0x590
330#define MUSB_HWVERS_1800 0x720
331#define MUSB_HWVERS_2000 0x800
332 u16 hwvers;
325 333
326/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ 334/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
327#define MUSB_PORT_STAT_RESUME (1 << 31) 335#define MUSB_PORT_STAT_RESUME (1 << 31)
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 0a2c4e3602c1..916065ba9e70 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -80,6 +80,17 @@ struct musb_hw_ep;
80#define tusb_dma_omap() 0 80#define tusb_dma_omap() 0
81#endif 81#endif
82 82
83/* Anomaly 05000456 - USB Receive Interrupt Is Not Generated in DMA Mode 1
84 * Only allow DMA mode 1 to be used when the USB will actually generate the
85 * interrupts we expect.
86 */
87#ifdef CONFIG_BLACKFIN
88# undef USE_MODE1
89# if !ANOMALY_05000456
90# define USE_MODE1
91# endif
92#endif
93
83/* 94/*
84 * DMA channel status ... updated by the dma controller driver whenever that 95 * DMA channel status ... updated by the dma controller driver whenever that
85 * status changes, and protected by the overall controller spinlock. 96 * status changes, and protected by the overall controller spinlock.
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 74073f9a43f0..c49b9ba025ab 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -429,112 +429,102 @@ void musb_g_tx(struct musb *musb, u8 epnum)
429 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); 429 DBG(4, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
430 430
431 dma = is_dma_capable() ? musb_ep->dma : NULL; 431 dma = is_dma_capable() ? musb_ep->dma : NULL;
432 do { 432
433 /* REVISIT for high bandwidth, MUSB_TXCSR_P_INCOMPTX 433 /*
434 * probably rates reporting as a host error 434 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
435 * probably rates reporting as a host error.
436 */
437 if (csr & MUSB_TXCSR_P_SENTSTALL) {
438 csr |= MUSB_TXCSR_P_WZC_BITS;
439 csr &= ~MUSB_TXCSR_P_SENTSTALL;
440 musb_writew(epio, MUSB_TXCSR, csr);
441 return;
442 }
443
444 if (csr & MUSB_TXCSR_P_UNDERRUN) {
445 /* We NAKed, no big deal... little reason to care. */
446 csr |= MUSB_TXCSR_P_WZC_BITS;
447 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
448 musb_writew(epio, MUSB_TXCSR, csr);
449 DBG(20, "underrun on ep%d, req %p\n", epnum, request);
450 }
451
452 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
453 /*
454 * SHOULD NOT HAPPEN... has with CPPI though, after
455 * changing SENDSTALL (and other cases); harmless?
435 */ 456 */
436 if (csr & MUSB_TXCSR_P_SENTSTALL) { 457 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name);
437 csr |= MUSB_TXCSR_P_WZC_BITS; 458 return;
438 csr &= ~MUSB_TXCSR_P_SENTSTALL; 459 }
439 musb_writew(epio, MUSB_TXCSR, csr); 460
440 break; 461 if (request) {
441 } 462 u8 is_dma = 0;
442 463
443 if (csr & MUSB_TXCSR_P_UNDERRUN) { 464 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
444 /* we NAKed, no big deal ... little reason to care */ 465 is_dma = 1;
445 csr |= MUSB_TXCSR_P_WZC_BITS; 466 csr |= MUSB_TXCSR_P_WZC_BITS;
446 csr &= ~(MUSB_TXCSR_P_UNDERRUN 467 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
447 | MUSB_TXCSR_TXPKTRDY); 468 MUSB_TXCSR_TXPKTRDY);
448 musb_writew(epio, MUSB_TXCSR, csr); 469 musb_writew(epio, MUSB_TXCSR, csr);
449 DBG(20, "underrun on ep%d, req %p\n", epnum, request); 470 /* Ensure writebuffer is empty. */
471 csr = musb_readw(epio, MUSB_TXCSR);
472 request->actual += musb_ep->dma->actual_len;
473 DBG(4, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
474 epnum, csr, musb_ep->dma->actual_len, request);
450 } 475 }
451 476
452 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 477 if (is_dma || request->actual == request->length) {
453 /* SHOULD NOT HAPPEN ... has with cppi though, after 478 /*
454 * changing SENDSTALL (and other cases); harmless? 479 * First, maybe a terminating short packet. Some DMA
480 * engines might handle this by themselves.
455 */ 481 */
456 DBG(5, "%s dma still busy?\n", musb_ep->end_point.name); 482 if ((request->zero && request->length
457 break; 483 && request->length % musb_ep->packet_sz == 0)
458 }
459
460 if (request) {
461 u8 is_dma = 0;
462
463 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
464 is_dma = 1;
465 csr |= MUSB_TXCSR_P_WZC_BITS;
466 csr &= ~(MUSB_TXCSR_DMAENAB
467 | MUSB_TXCSR_P_UNDERRUN
468 | MUSB_TXCSR_TXPKTRDY);
469 musb_writew(epio, MUSB_TXCSR, csr);
470 /* ensure writebuffer is empty */
471 csr = musb_readw(epio, MUSB_TXCSR);
472 request->actual += musb_ep->dma->actual_len;
473 DBG(4, "TXCSR%d %04x, dma off, "
474 "len %zu, req %p\n",
475 epnum, csr,
476 musb_ep->dma->actual_len,
477 request);
478 }
479
480 if (is_dma || request->actual == request->length) {
481
482 /* First, maybe a terminating short packet.
483 * Some DMA engines might handle this by
484 * themselves.
485 */
486 if ((request->zero
487 && request->length
488 && (request->length
489 % musb_ep->packet_sz)
490 == 0)
491#ifdef CONFIG_USB_INVENTRA_DMA 484#ifdef CONFIG_USB_INVENTRA_DMA
492 || (is_dma && 485 || (is_dma && (!dma->desired_mode ||
493 ((!dma->desired_mode) || 486 (request->actual &
494 (request->actual & 487 (musb_ep->packet_sz - 1))))
495 (musb_ep->packet_sz - 1))))
496#endif 488#endif
497 ) { 489 ) {
498 /* on dma completion, fifo may not 490 /*
499 * be available yet ... 491 * On DMA completion, FIFO may not be
500 */ 492 * available yet...
501 if (csr & MUSB_TXCSR_TXPKTRDY)
502 break;
503
504 DBG(4, "sending zero pkt\n");
505 musb_writew(epio, MUSB_TXCSR,
506 MUSB_TXCSR_MODE
507 | MUSB_TXCSR_TXPKTRDY);
508 request->zero = 0;
509 }
510
511 /* ... or if not, then complete it */
512 musb_g_giveback(musb_ep, request, 0);
513
514 /* kickstart next transfer if appropriate;
515 * the packet that just completed might not
516 * be transmitted for hours or days.
517 * REVISIT for double buffering...
518 * FIXME revisit for stalls too...
519 */ 493 */
520 musb_ep_select(mbase, epnum); 494 if (csr & MUSB_TXCSR_TXPKTRDY)
521 csr = musb_readw(epio, MUSB_TXCSR); 495 return;
522 if (csr & MUSB_TXCSR_FIFONOTEMPTY) 496
523 break; 497 DBG(4, "sending zero pkt\n");
524 request = musb_ep->desc 498 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
525 ? next_request(musb_ep) 499 | MUSB_TXCSR_TXPKTRDY);
526 : NULL; 500 request->zero = 0;
527 if (!request) {
528 DBG(4, "%s idle now\n",
529 musb_ep->end_point.name);
530 break;
531 }
532 } 501 }
533 502
534 txstate(musb, to_musb_request(request)); 503 /* ... or if not, then complete it. */
504 musb_g_giveback(musb_ep, request, 0);
505
506 /*
507 * Kickstart next transfer if appropriate;
508 * the packet that just completed might not
509 * be transmitted for hours or days.
510 * REVISIT for double buffering...
511 * FIXME revisit for stalls too...
512 */
513 musb_ep_select(mbase, epnum);
514 csr = musb_readw(epio, MUSB_TXCSR);
515 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
516 return;
517
518 if (!musb_ep->desc) {
519 DBG(4, "%s idle now\n",
520 musb_ep->end_point.name);
521 return;
522 } else
523 request = next_request(musb_ep);
535 } 524 }
536 525
537 } while (0); 526 txstate(musb, to_musb_request(request));
527 }
538} 528}
539 529
540/* ------------------------------------------------------------ */ 530/* ------------------------------------------------------------ */
@@ -966,6 +956,7 @@ static int musb_gadget_enable(struct usb_ep *ep,
966 956
967 musb_ep->desc = desc; 957 musb_ep->desc = desc;
968 musb_ep->busy = 0; 958 musb_ep->busy = 0;
959 musb_ep->wedged = 0;
969 status = 0; 960 status = 0;
970 961
971 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 962 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
@@ -1220,7 +1211,7 @@ done:
1220 * 1211 *
1221 * exported to ep0 code 1212 * exported to ep0 code
1222 */ 1213 */
1223int musb_gadget_set_halt(struct usb_ep *ep, int value) 1214static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1224{ 1215{
1225 struct musb_ep *musb_ep = to_musb_ep(ep); 1216 struct musb_ep *musb_ep = to_musb_ep(ep);
1226 u8 epnum = musb_ep->current_epnum; 1217 u8 epnum = musb_ep->current_epnum;
@@ -1262,7 +1253,8 @@ int musb_gadget_set_halt(struct usb_ep *ep, int value)
1262 goto done; 1253 goto done;
1263 } 1254 }
1264 } 1255 }
1265 } 1256 } else
1257 musb_ep->wedged = 0;
1266 1258
1267 /* set/clear the stall and toggle bits */ 1259 /* set/clear the stall and toggle bits */
1268 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1260 DBG(2, "%s: %s stall\n", ep->name, value ? "set" : "clear");
@@ -1301,6 +1293,21 @@ done:
1301 return status; 1293 return status;
1302} 1294}
1303 1295
1296/*
1297 * Sets the halt feature with the clear requests ignored
1298 */
1299static int musb_gadget_set_wedge(struct usb_ep *ep)
1300{
1301 struct musb_ep *musb_ep = to_musb_ep(ep);
1302
1303 if (!ep)
1304 return -EINVAL;
1305
1306 musb_ep->wedged = 1;
1307
1308 return usb_ep_set_halt(ep);
1309}
1310
1304static int musb_gadget_fifo_status(struct usb_ep *ep) 1311static int musb_gadget_fifo_status(struct usb_ep *ep)
1305{ 1312{
1306 struct musb_ep *musb_ep = to_musb_ep(ep); 1313 struct musb_ep *musb_ep = to_musb_ep(ep);
@@ -1371,6 +1378,7 @@ static const struct usb_ep_ops musb_ep_ops = {
1371 .queue = musb_gadget_queue, 1378 .queue = musb_gadget_queue,
1372 .dequeue = musb_gadget_dequeue, 1379 .dequeue = musb_gadget_dequeue,
1373 .set_halt = musb_gadget_set_halt, 1380 .set_halt = musb_gadget_set_halt,
1381 .set_wedge = musb_gadget_set_wedge,
1374 .fifo_status = musb_gadget_fifo_status, 1382 .fifo_status = musb_gadget_fifo_status,
1375 .fifo_flush = musb_gadget_fifo_flush 1383 .fifo_flush = musb_gadget_fifo_flush
1376}; 1384};
diff --git a/drivers/usb/musb/musb_gadget.h b/drivers/usb/musb/musb_gadget.h
index 59502da9f739..c8b140325d82 100644
--- a/drivers/usb/musb/musb_gadget.h
+++ b/drivers/usb/musb/musb_gadget.h
@@ -75,6 +75,8 @@ struct musb_ep {
75 /* later things are modified based on usage */ 75 /* later things are modified based on usage */
76 struct list_head req_list; 76 struct list_head req_list;
77 77
78 u8 wedged;
79
78 /* true if lock must be dropped but req_list may not be advanced */ 80 /* true if lock must be dropped but req_list may not be advanced */
79 u8 busy; 81 u8 busy;
80}; 82};
@@ -103,6 +105,4 @@ extern void musb_gadget_cleanup(struct musb *);
103 105
104extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int); 106extern void musb_g_giveback(struct musb_ep *, struct usb_request *, int);
105 107
106extern int musb_gadget_set_halt(struct usb_ep *ep, int value);
107
108#endif /* __MUSB_GADGET_H */ 108#endif /* __MUSB_GADGET_H */
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 522efb31b56b..8fba3f11e473 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -199,7 +199,6 @@ service_in_request(struct musb *musb, const struct usb_ctrlrequest *ctrlrequest)
199static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req) 199static void musb_g_ep0_giveback(struct musb *musb, struct usb_request *req)
200{ 200{
201 musb_g_giveback(&musb->endpoints[0].ep_in, req, 0); 201 musb_g_giveback(&musb->endpoints[0].ep_in, req, 0);
202 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
203} 202}
204 203
205/* 204/*
@@ -258,30 +257,53 @@ __acquires(musb->lock)
258 case USB_RECIP_INTERFACE: 257 case USB_RECIP_INTERFACE:
259 break; 258 break;
260 case USB_RECIP_ENDPOINT:{ 259 case USB_RECIP_ENDPOINT:{
261 const u8 num = ctrlrequest->wIndex & 0x0f; 260 const u8 epnum =
262 struct musb_ep *musb_ep; 261 ctrlrequest->wIndex & 0x0f;
262 struct musb_ep *musb_ep;
263 struct musb_hw_ep *ep;
264 void __iomem *regs;
265 int is_in;
266 u16 csr;
263 267
264 if (num == 0 268 if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
265 || num >= MUSB_C_NUM_EPS 269 ctrlrequest->wValue != USB_ENDPOINT_HALT)
266 || ctrlrequest->wValue
267 != USB_ENDPOINT_HALT)
268 break; 270 break;
269 271
270 if (ctrlrequest->wIndex & USB_DIR_IN) 272 ep = musb->endpoints + epnum;
271 musb_ep = &musb->endpoints[num].ep_in; 273 regs = ep->regs;
274 is_in = ctrlrequest->wIndex & USB_DIR_IN;
275 if (is_in)
276 musb_ep = &ep->ep_in;
272 else 277 else
273 musb_ep = &musb->endpoints[num].ep_out; 278 musb_ep = &ep->ep_out;
274 if (!musb_ep->desc) 279 if (!musb_ep->desc)
275 break; 280 break;
276 281
277 /* REVISIT do it directly, no locking games */ 282 handled = 1;
278 spin_unlock(&musb->lock); 283 /* Ignore request if endpoint is wedged */
279 musb_gadget_set_halt(&musb_ep->end_point, 0); 284 if (musb_ep->wedged)
280 spin_lock(&musb->lock); 285 break;
286
287 musb_ep_select(mbase, epnum);
288 if (is_in) {
289 csr = musb_readw(regs, MUSB_TXCSR);
290 csr |= MUSB_TXCSR_CLRDATATOG |
291 MUSB_TXCSR_P_WZC_BITS;
292 csr &= ~(MUSB_TXCSR_P_SENDSTALL |
293 MUSB_TXCSR_P_SENTSTALL |
294 MUSB_TXCSR_TXPKTRDY);
295 musb_writew(regs, MUSB_TXCSR, csr);
296 } else {
297 csr = musb_readw(regs, MUSB_RXCSR);
298 csr |= MUSB_RXCSR_CLRDATATOG |
299 MUSB_RXCSR_P_WZC_BITS;
300 csr &= ~(MUSB_RXCSR_P_SENDSTALL |
301 MUSB_RXCSR_P_SENTSTALL);
302 musb_writew(regs, MUSB_RXCSR, csr);
303 }
281 304
282 /* select ep0 again */ 305 /* select ep0 again */
283 musb_ep_select(mbase, 0); 306 musb_ep_select(mbase, 0);
284 handled = 1;
285 } break; 307 } break;
286 default: 308 default:
287 /* class, vendor, etc ... delegate */ 309 /* class, vendor, etc ... delegate */
@@ -374,10 +396,8 @@ stall:
374 int is_in; 396 int is_in;
375 u16 csr; 397 u16 csr;
376 398
377 if (epnum == 0 399 if (epnum == 0 || epnum >= MUSB_C_NUM_EPS ||
378 || epnum >= MUSB_C_NUM_EPS 400 ctrlrequest->wValue != USB_ENDPOINT_HALT)
379 || ctrlrequest->wValue
380 != USB_ENDPOINT_HALT)
381 break; 401 break;
382 402
383 ep = musb->endpoints + epnum; 403 ep = musb->endpoints + epnum;
@@ -392,24 +412,20 @@ stall:
392 412
393 musb_ep_select(mbase, epnum); 413 musb_ep_select(mbase, epnum);
394 if (is_in) { 414 if (is_in) {
395 csr = musb_readw(regs, 415 csr = musb_readw(regs, MUSB_TXCSR);
396 MUSB_TXCSR);
397 if (csr & MUSB_TXCSR_FIFONOTEMPTY) 416 if (csr & MUSB_TXCSR_FIFONOTEMPTY)
398 csr |= MUSB_TXCSR_FLUSHFIFO; 417 csr |= MUSB_TXCSR_FLUSHFIFO;
399 csr |= MUSB_TXCSR_P_SENDSTALL 418 csr |= MUSB_TXCSR_P_SENDSTALL
400 | MUSB_TXCSR_CLRDATATOG 419 | MUSB_TXCSR_CLRDATATOG
401 | MUSB_TXCSR_P_WZC_BITS; 420 | MUSB_TXCSR_P_WZC_BITS;
402 musb_writew(regs, MUSB_TXCSR, 421 musb_writew(regs, MUSB_TXCSR, csr);
403 csr);
404 } else { 422 } else {
405 csr = musb_readw(regs, 423 csr = musb_readw(regs, MUSB_RXCSR);
406 MUSB_RXCSR);
407 csr |= MUSB_RXCSR_P_SENDSTALL 424 csr |= MUSB_RXCSR_P_SENDSTALL
408 | MUSB_RXCSR_FLUSHFIFO 425 | MUSB_RXCSR_FLUSHFIFO
409 | MUSB_RXCSR_CLRDATATOG 426 | MUSB_RXCSR_CLRDATATOG
410 | MUSB_RXCSR_P_WZC_BITS; 427 | MUSB_RXCSR_P_WZC_BITS;
411 musb_writew(regs, MUSB_RXCSR, 428 musb_writew(regs, MUSB_RXCSR, csr);
412 csr);
413 } 429 }
414 430
415 /* select ep0 again */ 431 /* select ep0 again */
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index e3ab40a966eb..74c4c3698f1e 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1642,18 +1642,18 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1642 c = musb->dma_controller; 1642 c = musb->dma_controller;
1643 1643
1644 if (usb_pipeisoc(pipe)) { 1644 if (usb_pipeisoc(pipe)) {
1645 int status = 0; 1645 int d_status = 0;
1646 struct usb_iso_packet_descriptor *d; 1646 struct usb_iso_packet_descriptor *d;
1647 1647
1648 d = urb->iso_frame_desc + qh->iso_idx; 1648 d = urb->iso_frame_desc + qh->iso_idx;
1649 1649
1650 if (iso_err) { 1650 if (iso_err) {
1651 status = -EILSEQ; 1651 d_status = -EILSEQ;
1652 urb->error_count++; 1652 urb->error_count++;
1653 } 1653 }
1654 if (rx_count > d->length) { 1654 if (rx_count > d->length) {
1655 if (status == 0) { 1655 if (d_status == 0) {
1656 status = -EOVERFLOW; 1656 d_status = -EOVERFLOW;
1657 urb->error_count++; 1657 urb->error_count++;
1658 } 1658 }
1659 DBG(2, "** OVERFLOW %d into %d\n",\ 1659 DBG(2, "** OVERFLOW %d into %d\n",\
@@ -1662,7 +1662,7 @@ void musb_host_rx(struct musb *musb, u8 epnum)
1662 length = d->length; 1662 length = d->length;
1663 } else 1663 } else
1664 length = rx_count; 1664 length = rx_count;
1665 d->status = status; 1665 d->status = d_status;
1666 buf = urb->transfer_dma + d->offset; 1666 buf = urb->transfer_dma + d->offset;
1667 } else { 1667 } else {
1668 length = rx_count; 1668 length = rx_count;
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index cc1d71b57d3c..473a94ef905f 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -465,9 +465,9 @@ static inline u16 musb_read_hwvers(void __iomem *mbase)
465 return 0; 465 return 0;
466} 466}
467 467
468static inline u16 musb_read_target_reg_base(u8 i, void __iomem *mbase) 468static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
469{ 469{
470 return 0; 470 return NULL;
471} 471}
472 472
473static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs, 473static inline void musb_write_rxfunaddr(void __iomem *ep_target_regs,
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 5e83f96d6b77..a237550f91bf 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -259,6 +259,11 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
259 if (!int_hsdma) 259 if (!int_hsdma)
260 goto done; 260 goto done;
261 261
262#ifdef CONFIG_BLACKFIN
263 /* Clear DMA interrupt flags */
264 musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
265#endif
266
262 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) { 267 for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
263 if (int_hsdma & (1 << bchannel)) { 268 if (int_hsdma & (1 << bchannel)) {
264 musb_channel = (struct musb_dma_channel *) 269 musb_channel = (struct musb_dma_channel *)
@@ -280,7 +285,7 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
280 channel->actual_len = addr 285 channel->actual_len = addr
281 - musb_channel->start_addr; 286 - musb_channel->start_addr;
282 287
283 DBG(2, "ch %p, 0x%x -> 0x%x (%d / %d) %s\n", 288 DBG(2, "ch %p, 0x%x -> 0x%x (%zu / %d) %s\n",
284 channel, musb_channel->start_addr, 289 channel, musb_channel->start_addr,
285 addr, channel->actual_len, 290 addr, channel->actual_len,
286 musb_channel->len, 291 musb_channel->len,
@@ -324,11 +329,6 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
324 } 329 }
325 } 330 }
326 331
327#ifdef CONFIG_BLACKFIN
328 /* Clear DMA interrup flags */
329 musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
330#endif
331
332 retval = IRQ_HANDLED; 332 retval = IRQ_HANDLED;
333done: 333done:
334 spin_unlock_irqrestore(&musb->lock, flags); 334 spin_unlock_irqrestore(&musb->lock, flags);
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6761d2088db8..83beeac5e7bf 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -315,7 +315,7 @@ int musb_platform_exit(struct musb *musb)
315 musb_platform_suspend(musb); 315 musb_platform_suspend(musb);
316 316
317 clk_put(musb->clock); 317 clk_put(musb->clock);
318 musb->clock = 0; 318 musb->clock = NULL;
319 319
320 return 0; 320 return 0;
321} 321}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index aa884d072f0b..de56b3d743d7 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -41,6 +41,15 @@ config ISP1301_OMAP
41 This driver can also be built as a module. If so, the module 41 This driver can also be built as a module. If so, the module
42 will be called isp1301_omap. 42 will be called isp1301_omap.
43 43
44config USB_ULPI
45 bool "Generic ULPI Transceiver Driver"
46 depends on ARM
47 help
48 Enable this to support ULPI connected USB OTG transceivers which
49 are likely found on embedded boards.
50
51 The only chip currently supported is NXP's ISP1504
52
44config TWL4030_USB 53config TWL4030_USB
45 tristate "TWL4030 USB Transceiver Driver" 54 tristate "TWL4030 USB Transceiver Driver"
46 depends on TWL4030_CORE && REGULATOR_TWL4030 55 depends on TWL4030_CORE && REGULATOR_TWL4030
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 208167856529..aeb49a8ec412 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o 10obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o 11obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
12obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o 12obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
13obj-$(CONFIG_USB_ULPI) += ulpi.o
13 14
14ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG 15ccflags-$(CONFIG_USB_DEBUG) += -DDEBUG
15ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG 16ccflags-$(CONFIG_USB_GADGET_DEBUG) += -DDEBUG
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 9e3e7a5c258b..bd9883f41e63 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -598,12 +598,12 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
598 * USB_LINK_VBUS state. musb_hdrc won't care until it 598 * USB_LINK_VBUS state. musb_hdrc won't care until it
599 * starts to handle softconnect right. 599 * starts to handle softconnect right.
600 */ 600 */
601 twl4030charger_usb_en(status == USB_LINK_VBUS);
602
603 if (status == USB_LINK_NONE) 601 if (status == USB_LINK_NONE)
604 twl4030_phy_suspend(twl, 0); 602 twl4030_phy_suspend(twl, 0);
605 else 603 else
606 twl4030_phy_resume(twl); 604 twl4030_phy_resume(twl);
605
606 twl4030charger_usb_en(status == USB_LINK_VBUS);
607 } 607 }
608 sysfs_notify(&twl->dev->kobj, NULL, "vbus"); 608 sysfs_notify(&twl->dev->kobj, NULL, "vbus");
609 609
diff --git a/drivers/usb/otg/ulpi.c b/drivers/usb/otg/ulpi.c
new file mode 100644
index 000000000000..896527456b7e
--- /dev/null
+++ b/drivers/usb/otg/ulpi.c
@@ -0,0 +1,136 @@
1/*
2 * Generic ULPI USB transceiver support
3 *
4 * Copyright (C) 2009 Daniel Mack <daniel@caiaq.de>
5 *
6 * Based on sources from
7 *
8 * Sascha Hauer <s.hauer@pengutronix.de>
9 * Freescale Semiconductors
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */
25
26#include <linux/kernel.h>
27#include <linux/usb.h>
28#include <linux/usb/otg.h>
29#include <linux/usb/ulpi.h>
30
31/* ULPI register addresses */
32#define ULPI_VID_LOW 0x00 /* Vendor ID low */
33#define ULPI_VID_HIGH 0x01 /* Vendor ID high */
34#define ULPI_PID_LOW 0x02 /* Product ID low */
35#define ULPI_PID_HIGH 0x03 /* Product ID high */
36#define ULPI_ITFCTL 0x07 /* Interface Control */
37#define ULPI_OTGCTL 0x0A /* OTG Control */
38
39/* add to above register address to access Set/Clear functions */
40#define ULPI_REG_SET 0x01
41#define ULPI_REG_CLEAR 0x02
42
43/* ULPI OTG Control Register bits */
44#define ID_PULL_UP (1 << 0) /* enable ID Pull Up */
45#define DP_PULL_DOWN (1 << 1) /* enable DP Pull Down */
46#define DM_PULL_DOWN (1 << 2) /* enable DM Pull Down */
47#define DISCHRG_VBUS (1 << 3) /* Discharge Vbus */
48#define CHRG_VBUS (1 << 4) /* Charge Vbus */
49#define DRV_VBUS (1 << 5) /* Drive Vbus */
50#define DRV_VBUS_EXT (1 << 6) /* Drive Vbus external */
51#define USE_EXT_VBUS_IND (1 << 7) /* Use ext. Vbus indicator */
52
53#define ULPI_ID(vendor, product) (((vendor) << 16) | (product))
54
55#define TR_FLAG(flags, a, b) (((flags) & a) ? b : 0)
56
57/* ULPI hardcoded IDs, used for probing */
58static unsigned int ulpi_ids[] = {
59 ULPI_ID(0x04cc, 0x1504), /* NXP ISP1504 */
60};
61
62static int ulpi_set_flags(struct otg_transceiver *otg)
63{
64 unsigned int flags = 0;
65
66 if (otg->flags & USB_OTG_PULLUP_ID)
67 flags |= ID_PULL_UP;
68
69 if (otg->flags & USB_OTG_PULLDOWN_DM)
70 flags |= DM_PULL_DOWN;
71
72 if (otg->flags & USB_OTG_PULLDOWN_DP)
73 flags |= DP_PULL_DOWN;
74
75 if (otg->flags & USB_OTG_EXT_VBUS_INDICATOR)
76 flags |= USE_EXT_VBUS_IND;
77
78 return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
79}
80
81static int ulpi_init(struct otg_transceiver *otg)
82{
83 int i, vid, pid;
84
85 vid = (otg_io_read(otg, ULPI_VID_HIGH) << 8) |
86 otg_io_read(otg, ULPI_VID_LOW);
87 pid = (otg_io_read(otg, ULPI_PID_HIGH) << 8) |
88 otg_io_read(otg, ULPI_PID_LOW);
89
90 pr_info("ULPI transceiver vendor/product ID 0x%04x/0x%04x\n", vid, pid);
91
92 for (i = 0; i < ARRAY_SIZE(ulpi_ids); i++)
93 if (ulpi_ids[i] == ULPI_ID(vid, pid))
94 return ulpi_set_flags(otg);
95
96 pr_err("ULPI ID does not match any known transceiver.\n");
97 return -ENODEV;
98}
99
100static int ulpi_set_vbus(struct otg_transceiver *otg, bool on)
101{
102 unsigned int flags = otg_io_read(otg, ULPI_OTGCTL);
103
104 flags &= ~(DRV_VBUS | DRV_VBUS_EXT);
105
106 if (on) {
107 if (otg->flags & USB_OTG_DRV_VBUS)
108 flags |= DRV_VBUS;
109
110 if (otg->flags & USB_OTG_DRV_VBUS_EXT)
111 flags |= DRV_VBUS_EXT;
112 }
113
114 return otg_io_write(otg, flags, ULPI_OTGCTL + ULPI_REG_SET);
115}
116
117struct otg_transceiver *
118otg_ulpi_create(struct otg_io_access_ops *ops,
119 unsigned int flags)
120{
121 struct otg_transceiver *otg;
122
123 otg = kzalloc(sizeof(*otg), GFP_KERNEL);
124 if (!otg)
125 return NULL;
126
127 otg->label = "ULPI";
128 otg->flags = flags;
129 otg->io_ops = ops;
130 otg->init = ulpi_init;
131 otg->set_vbus = ulpi_set_vbus;
132
133 return otg;
134}
135EXPORT_SYMBOL_GPL(otg_ulpi_create);
136
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 131e61adaaf7..a9c2dec8e3fb 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -1,4 +1,6 @@
1/* 1/*
2 * Copyright (C) 2009 by Bart Hartgers (bart.hartgers+ark3116@gmail.com)
3 * Original version:
2 * Copyright (C) 2006 4 * Copyright (C) 2006
3 * Simon Schulz (ark3116_driver <at> auctionant.de) 5 * Simon Schulz (ark3116_driver <at> auctionant.de)
4 * 6 *
@@ -6,10 +8,13 @@
6 * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547, 8 * - implements a driver for the arkmicro ark3116 chipset (vendor=0x6547,
7 * productid=0x0232) (used in a datacable called KQ-U8A) 9 * productid=0x0232) (used in a datacable called KQ-U8A)
8 * 10 *
9 * - based on code by krisfx -> thanks !! 11 * Supports full modem status lines, break, hardware flow control. Does not
10 * (see http://www.linuxquestions.org/questions/showthread.php?p=2184457#post2184457) 12 * support software flow control, since I do not know how to enable it in hw.
11 * 13 *
12 * - based on logs created by usbsnoopy 14 * This driver is a essentially new implementation. I initially dug
15 * into the old ark3116.c driver and suddenly realized the ark3116 is
16 * a 16450 with a USB interface glued to it. See comments at the
17 * bottom of this file.
13 * 18 *
14 * This program is free software; you can redistribute it and/or modify it 19 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 20 * under the terms of the GNU General Public License as published by the
@@ -19,15 +24,31 @@
19 24
20#include <linux/kernel.h> 25#include <linux/kernel.h>
21#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/ioctl.h>
22#include <linux/tty.h> 28#include <linux/tty.h>
29#include <linux/tty_flip.h>
23#include <linux/module.h> 30#include <linux/module.h>
24#include <linux/usb.h> 31#include <linux/usb.h>
25#include <linux/usb/serial.h> 32#include <linux/usb/serial.h>
26#include <linux/serial.h> 33#include <linux/serial.h>
34#include <linux/serial_reg.h>
27#include <linux/uaccess.h> 35#include <linux/uaccess.h>
28 36#include <linux/mutex.h>
37#include <linux/spinlock.h>
29 38
30static int debug; 39static int debug;
40/*
41 * Version information
42 */
43
44#define DRIVER_VERSION "v0.5"
45#define DRIVER_AUTHOR "Bart Hartgers <bart.hartgers+ark3116@gmail.com>"
46#define DRIVER_DESC "USB ARK3116 serial/IrDA driver"
47#define DRIVER_DEV_DESC "ARK3116 RS232/IrDA"
48#define DRIVER_NAME "ark3116"
49
50/* usb timeout of 1 second */
51#define ARK_TIMEOUT (1*HZ)
31 52
32static struct usb_device_id id_table [] = { 53static struct usb_device_id id_table [] = {
33 { USB_DEVICE(0x6547, 0x0232) }, 54 { USB_DEVICE(0x6547, 0x0232) },
@@ -45,118 +66,152 @@ static int is_irda(struct usb_serial *serial)
45 return 0; 66 return 0;
46} 67}
47 68
48static inline void ARK3116_SND(struct usb_serial *serial, int seq, 69struct ark3116_private {
49 __u8 request, __u8 requesttype, 70 wait_queue_head_t delta_msr_wait;
50 __u16 value, __u16 index) 71 struct async_icount icount;
72 int irda; /* 1 for irda device */
73
74 /* protects hw register updates */
75 struct mutex hw_lock;
76
77 int quot; /* baudrate divisor */
78 __u32 lcr; /* line control register value */
79 __u32 hcr; /* handshake control register (0x8)
80 * value */
81 __u32 mcr; /* modem contol register value */
82
83 /* protects the status values below */
84 spinlock_t status_lock;
85 __u32 msr; /* modem status register value */
86 __u32 lsr; /* line status register value */
87};
88
89static int ark3116_write_reg(struct usb_serial *serial,
90 unsigned reg, __u8 val)
51{ 91{
52 int result; 92 int result;
93 /* 0xfe 0x40 are magic values taken from original driver */
53 result = usb_control_msg(serial->dev, 94 result = usb_control_msg(serial->dev,
54 usb_sndctrlpipe(serial->dev, 0), 95 usb_sndctrlpipe(serial->dev, 0),
55 request, requesttype, value, index, 96 0xfe, 0x40, val, reg,
56 NULL, 0x00, 1000); 97 NULL, 0, ARK_TIMEOUT);
57 dbg("%03d > ok", seq); 98 return result;
58} 99}
59 100
60static inline void ARK3116_RCV(struct usb_serial *serial, int seq, 101static int ark3116_read_reg(struct usb_serial *serial,
61 __u8 request, __u8 requesttype, 102 unsigned reg, unsigned char *buf)
62 __u16 value, __u16 index, __u8 expected,
63 char *buf)
64{ 103{
65 int result; 104 int result;
105 /* 0xfe 0xc0 are magic values taken from original driver */
66 result = usb_control_msg(serial->dev, 106 result = usb_control_msg(serial->dev,
67 usb_rcvctrlpipe(serial->dev, 0), 107 usb_rcvctrlpipe(serial->dev, 0),
68 request, requesttype, value, index, 108 0xfe, 0xc0, 0, reg,
69 buf, 0x0000001, 1000); 109 buf, 1, ARK_TIMEOUT);
70 if (result) 110 if (result < 0)
71 dbg("%03d < %d bytes [0x%02X]", seq, result, 111 return result;
72 ((unsigned char *)buf)[0]);
73 else 112 else
74 dbg("%03d < 0 bytes", seq); 113 return buf[0];
75} 114}
76 115
77static inline void ARK3116_RCV_QUIET(struct usb_serial *serial, 116static inline int calc_divisor(int bps)
78 __u8 request, __u8 requesttype,
79 __u16 value, __u16 index, char *buf)
80{ 117{
81 usb_control_msg(serial->dev, 118 /* Original ark3116 made some exceptions in rounding here
82 usb_rcvctrlpipe(serial->dev, 0), 119 * because windows did the same. Assume that is not really
83 request, requesttype, value, index, 120 * necessary.
84 buf, 0x0000001, 1000); 121 * Crystal is 12MHz, probably because of USB, but we divide by 4?
122 */
123 return (12000000 + 2*bps) / (4*bps);
85} 124}
86 125
87static int ark3116_attach(struct usb_serial *serial) 126static int ark3116_attach(struct usb_serial *serial)
88{ 127{
89 char *buf; 128 struct usb_serial_port *port = serial->port[0];
129 struct ark3116_private *priv;
130
131 /* make sure we have our end-points */
132 if ((serial->num_bulk_in == 0) ||
133 (serial->num_bulk_out == 0) ||
134 (serial->num_interrupt_in == 0)) {
135 dev_err(&serial->dev->dev,
136 "%s - missing endpoint - "
137 "bulk in: %d, bulk out: %d, int in %d\n",
138 KBUILD_MODNAME,
139 serial->num_bulk_in,
140 serial->num_bulk_out,
141 serial->num_interrupt_in);
142 return -EINVAL;
143 }
90 144
91 buf = kmalloc(1, GFP_KERNEL); 145 priv = kzalloc(sizeof(struct ark3116_private),
92 if (!buf) { 146 GFP_KERNEL);
93 dbg("error kmalloc -> out of mem?"); 147 if (!priv)
94 return -ENOMEM; 148 return -ENOMEM;
95 }
96 149
97 if (is_irda(serial)) 150 init_waitqueue_head(&priv->delta_msr_wait);
98 dbg("IrDA mode"); 151 mutex_init(&priv->hw_lock);
152 spin_lock_init(&priv->status_lock);
153
154 priv->irda = is_irda(serial);
99 155
100 /* 3 */ 156 usb_set_serial_port_data(port, priv);
101 ARK3116_SND(serial, 3, 0xFE, 0x40, 0x0008, 0x0002);
102 ARK3116_SND(serial, 4, 0xFE, 0x40, 0x0008, 0x0001);
103 ARK3116_SND(serial, 5, 0xFE, 0x40, 0x0000, 0x0008);
104 ARK3116_SND(serial, 6, 0xFE, 0x40, is_irda(serial) ? 0x0001 : 0x0000,
105 0x000B);
106 157
107 if (is_irda(serial)) { 158 /* setup the hardware */
108 ARK3116_SND(serial, 1001, 0xFE, 0x40, 0x0000, 0x000C); 159 ark3116_write_reg(serial, UART_IER, 0);
109 ARK3116_SND(serial, 1002, 0xFE, 0x40, 0x0041, 0x000D); 160 /* disable DMA */
110 ARK3116_SND(serial, 1003, 0xFE, 0x40, 0x0001, 0x000A); 161 ark3116_write_reg(serial, UART_FCR, 0);
162 /* handshake control */
163 priv->hcr = 0;
164 ark3116_write_reg(serial, 0x8 , 0);
165 /* modem control */
166 priv->mcr = 0;
167 ark3116_write_reg(serial, UART_MCR, 0);
168
169 if (!(priv->irda)) {
170 ark3116_write_reg(serial, 0xb , 0);
171 } else {
172 ark3116_write_reg(serial, 0xb , 1);
173 ark3116_write_reg(serial, 0xc , 0);
174 ark3116_write_reg(serial, 0xd , 0x41);
175 ark3116_write_reg(serial, 0xa , 1);
111 } 176 }
112 177
113 /* <-- seq7 */ 178 /* setup baudrate */
114 ARK3116_RCV(serial, 7, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf); 179 ark3116_write_reg(serial, UART_LCR, UART_LCR_DLAB);
115 ARK3116_SND(serial, 8, 0xFE, 0x40, 0x0080, 0x0003);
116 ARK3116_SND(serial, 9, 0xFE, 0x40, 0x001A, 0x0000);
117 ARK3116_SND(serial, 10, 0xFE, 0x40, 0x0000, 0x0001);
118 ARK3116_SND(serial, 11, 0xFE, 0x40, 0x0000, 0x0003);
119
120 /* <-- seq12 */
121 ARK3116_RCV(serial, 12, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
122 ARK3116_SND(serial, 13, 0xFE, 0x40, 0x0000, 0x0004);
123
124 /* 14 */
125 ARK3116_RCV(serial, 14, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
126 ARK3116_SND(serial, 15, 0xFE, 0x40, 0x0000, 0x0004);
127
128 /* 16 */
129 ARK3116_RCV(serial, 16, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
130 /* --> seq17 */
131 ARK3116_SND(serial, 17, 0xFE, 0x40, 0x0001, 0x0004);
132
133 /* <-- seq18 */
134 ARK3116_RCV(serial, 18, 0xFE, 0xC0, 0x0000, 0x0004, 0x01, buf);
135
136 /* --> seq19 */
137 ARK3116_SND(serial, 19, 0xFE, 0x40, 0x0003, 0x0004);
138
139 /* <-- seq20 */
140 /* seems like serial port status info (RTS, CTS, ...) */
141 /* returns modem control line status?! */
142 ARK3116_RCV(serial, 20, 0xFE, 0xC0, 0x0000, 0x0006, 0xFF, buf);
143
144 /* set 9600 baud & do some init?! */
145 ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
146 ARK3116_SND(serial, 148, 0xFE, 0x40, 0x0038, 0x0000);
147 ARK3116_SND(serial, 149, 0xFE, 0x40, 0x0001, 0x0001);
148 if (is_irda(serial))
149 ARK3116_SND(serial, 1004, 0xFE, 0x40, 0x0000, 0x0009);
150 ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
151 ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
152 ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
153 ARK3116_RCV(serial, 153, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
154 ARK3116_SND(serial, 154, 0xFE, 0x40, 0x0003, 0x0003);
155 180
156 kfree(buf); 181 /* setup for 9600 8N1 */
182 priv->quot = calc_divisor(9600);
183 ark3116_write_reg(serial, UART_DLL, priv->quot & 0xff);
184 ark3116_write_reg(serial, UART_DLM, (priv->quot>>8) & 0xff);
185
186 priv->lcr = UART_LCR_WLEN8;
187 ark3116_write_reg(serial, UART_LCR, UART_LCR_WLEN8);
188
189 ark3116_write_reg(serial, 0xe, 0);
190
191 if (priv->irda)
192 ark3116_write_reg(serial, 0x9, 0);
193
194 dev_info(&serial->dev->dev,
195 "%s using %s mode\n",
196 KBUILD_MODNAME,
197 priv->irda ? "IrDA" : "RS232");
157 return 0; 198 return 0;
158} 199}
159 200
201static void ark3116_release(struct usb_serial *serial)
202{
203 struct usb_serial_port *port = serial->port[0];
204 struct ark3116_private *priv = usb_get_serial_port_data(port);
205
206 /* device is closed, so URBs and DMA should be down */
207
208 usb_set_serial_port_data(port, NULL);
209
210 mutex_destroy(&priv->hw_lock);
211
212 kfree(priv);
213}
214
160static void ark3116_init_termios(struct tty_struct *tty) 215static void ark3116_init_termios(struct tty_struct *tty)
161{ 216{
162 struct ktermios *termios = tty->termios; 217 struct ktermios *termios = tty->termios;
@@ -172,200 +227,189 @@ static void ark3116_set_termios(struct tty_struct *tty,
172 struct ktermios *old_termios) 227 struct ktermios *old_termios)
173{ 228{
174 struct usb_serial *serial = port->serial; 229 struct usb_serial *serial = port->serial;
230 struct ark3116_private *priv = usb_get_serial_port_data(port);
175 struct ktermios *termios = tty->termios; 231 struct ktermios *termios = tty->termios;
176 unsigned int cflag = termios->c_cflag; 232 unsigned int cflag = termios->c_cflag;
177 int baud; 233 int bps = tty_get_baud_rate(tty);
178 int ark3116_baud; 234 int quot;
179 char *buf; 235 __u8 lcr, hcr, eval;
180 char config; 236
181 237 /* set data bit count */
182 config = 0; 238 switch (cflag & CSIZE) {
183 239 case CS5:
184 dbg("%s - port %d", __func__, port->number); 240 lcr = UART_LCR_WLEN5;
241 break;
242 case CS6:
243 lcr = UART_LCR_WLEN6;
244 break;
245 case CS7:
246 lcr = UART_LCR_WLEN7;
247 break;
248 default:
249 case CS8:
250 lcr = UART_LCR_WLEN8;
251 break;
252 }
253 if (cflag & CSTOPB)
254 lcr |= UART_LCR_STOP;
255 if (cflag & PARENB)
256 lcr |= UART_LCR_PARITY;
257 if (!(cflag & PARODD))
258 lcr |= UART_LCR_EPAR;
259#ifdef CMSPAR
260 if (cflag & CMSPAR)
261 lcr |= UART_LCR_SPAR;
262#endif
263 /* handshake control */
264 hcr = (cflag & CRTSCTS) ? 0x03 : 0x00;
265
266 /* calc baudrate */
267 dbg("%s - setting bps to %d", __func__, bps);
268 eval = 0;
269 switch (bps) {
270 case 0:
271 quot = calc_divisor(9600);
272 break;
273 default:
274 if ((bps < 75) || (bps > 3000000))
275 bps = 9600;
276 quot = calc_divisor(bps);
277 break;
278 case 460800:
279 eval = 1;
280 quot = calc_divisor(bps);
281 break;
282 case 921600:
283 eval = 2;
284 quot = calc_divisor(bps);
285 break;
286 }
185 287
288 /* Update state: synchronize */
289 mutex_lock(&priv->hw_lock);
186 290
187 cflag = termios->c_cflag; 291 /* keep old LCR_SBC bit */
188 termios->c_cflag &= ~(CMSPAR|CRTSCTS); 292 lcr |= (priv->lcr & UART_LCR_SBC);
189 293
190 buf = kmalloc(1, GFP_KERNEL); 294 dbg("%s - setting hcr:0x%02x,lcr:0x%02x,quot:%d",
191 if (!buf) { 295 __func__, hcr, lcr, quot);
192 dbg("error kmalloc");
193 *termios = *old_termios;
194 return;
195 }
196 296
197 /* set data bit count (8/7/6/5) */ 297 /* handshake control */
198 if (cflag & CSIZE) { 298 if (priv->hcr != hcr) {
199 switch (cflag & CSIZE) { 299 priv->hcr = hcr;
200 case CS5: 300 ark3116_write_reg(serial, 0x8, hcr);
201 config |= 0x00;
202 dbg("setting CS5");
203 break;
204 case CS6:
205 config |= 0x01;
206 dbg("setting CS6");
207 break;
208 case CS7:
209 config |= 0x02;
210 dbg("setting CS7");
211 break;
212 default:
213 dbg("CSIZE was set but not CS5-CS8, using CS8!");
214 /* fall through */
215 case CS8:
216 config |= 0x03;
217 dbg("setting CS8");
218 break;
219 }
220 } 301 }
221 302
222 /* set parity (NONE/EVEN/ODD) */ 303 /* baudrate */
223 if (cflag & PARENB) { 304 if (priv->quot != quot) {
224 if (cflag & PARODD) { 305 priv->quot = quot;
225 config |= 0x08; 306 priv->lcr = lcr; /* need to write lcr anyway */
226 dbg("setting parity to ODD"); 307
227 } else { 308 /* disable DMA since transmit/receive is
228 config |= 0x18; 309 * shadowed by UART_DLL
229 dbg("setting parity to EVEN"); 310 */
230 } 311 ark3116_write_reg(serial, UART_FCR, 0);
231 } else { 312
232 dbg("setting parity to NONE"); 313 ark3116_write_reg(serial, UART_LCR,
314 lcr|UART_LCR_DLAB);
315 ark3116_write_reg(serial, UART_DLL, quot & 0xff);
316 ark3116_write_reg(serial, UART_DLM, (quot>>8) & 0xff);
317
318 /* restore lcr */
319 ark3116_write_reg(serial, UART_LCR, lcr);
320 /* magic baudrate thingy: not sure what it does,
321 * but windows does this as well.
322 */
323 ark3116_write_reg(serial, 0xe, eval);
324
325 /* enable DMA */
326 ark3116_write_reg(serial, UART_FCR, UART_FCR_DMA_SELECT);
327 } else if (priv->lcr != lcr) {
328 priv->lcr = lcr;
329 ark3116_write_reg(serial, UART_LCR, lcr);
233 } 330 }
234 331
235 /* set stop bit (1/2) */ 332 mutex_unlock(&priv->hw_lock);
236 if (cflag & CSTOPB) {
237 config |= 0x04;
238 dbg("setting 2 stop bits");
239 } else {
240 dbg("setting 1 stop bit");
241 }
242 333
243 /* set baudrate */ 334 /* check for software flow control */
244 baud = tty_get_baud_rate(tty); 335 if (I_IXOFF(tty) || I_IXON(tty)) {
245 336 dev_warn(&serial->dev->dev,
246 switch (baud) { 337 "%s: don't know how to do software flow control\n",
247 case 75: 338 KBUILD_MODNAME);
248 case 150:
249 case 300:
250 case 600:
251 case 1200:
252 case 1800:
253 case 2400:
254 case 4800:
255 case 9600:
256 case 19200:
257 case 38400:
258 case 57600:
259 case 115200:
260 case 230400:
261 case 460800:
262 /* Report the resulting rate back to the caller */
263 tty_encode_baud_rate(tty, baud, baud);
264 break;
265 /* set 9600 as default (if given baudrate is invalid for example) */
266 default:
267 tty_encode_baud_rate(tty, 9600, 9600);
268 case 0:
269 baud = 9600;
270 } 339 }
271 340
272 /* 341 /* Don't rewrite B0 */
273 * found by try'n'error, be careful, maybe there are other options 342 if (tty_termios_baud_rate(termios))
274 * for multiplicator etc! (3.5 for example) 343 tty_termios_encode_baud_rate(termios, bps, bps);
275 */ 344}
276 if (baud == 460800)
277 /* strange, for 460800 the formula is wrong
278 * if using round() then 9600baud is wrong) */
279 ark3116_baud = 7;
280 else
281 ark3116_baud = 3000000 / baud;
282
283 /* ? */
284 ARK3116_RCV(serial, 0, 0xFE, 0xC0, 0x0000, 0x0003, 0x03, buf);
285
286 /* offset = buf[0]; */
287 /* offset = 0x03; */
288 /* dbg("using 0x%04X as target for 0x0003:", 0x0080 + offset); */
289
290 /* set baudrate */
291 dbg("setting baudrate to %d (->reg=%d)", baud, ark3116_baud);
292 ARK3116_SND(serial, 147, 0xFE, 0x40, 0x0083, 0x0003);
293 ARK3116_SND(serial, 148, 0xFE, 0x40,
294 (ark3116_baud & 0x00FF), 0x0000);
295 ARK3116_SND(serial, 149, 0xFE, 0x40,
296 (ark3116_baud & 0xFF00) >> 8, 0x0001);
297 ARK3116_SND(serial, 150, 0xFE, 0x40, 0x0003, 0x0003);
298
299 /* ? */
300 ARK3116_RCV(serial, 151, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
301 ARK3116_SND(serial, 152, 0xFE, 0x40, 0x0000, 0x0003);
302
303 /* set data bit count, stop bit count & parity: */
304 dbg("updating bit count, stop bit or parity (cfg=0x%02X)", config);
305 ARK3116_RCV(serial, 153, 0xFE, 0xC0, 0x0000, 0x0003, 0x00, buf);
306 ARK3116_SND(serial, 154, 0xFE, 0x40, config, 0x0003);
307 345
308 if (cflag & CRTSCTS) 346static void ark3116_close(struct usb_serial_port *port)
309 dbg("CRTSCTS not supported by chipset?!"); 347{
348 struct usb_serial *serial = port->serial;
310 349
311 /* TEST ARK3116_SND(154, 0xFE, 0x40, 0xFFFF, 0x0006); */ 350 if (serial->dev) {
351 /* disable DMA */
352 ark3116_write_reg(serial, UART_FCR, 0);
312 353
313 kfree(buf); 354 /* deactivate interrupts */
355 ark3116_write_reg(serial, UART_IER, 0);
314 356
315 return; 357 /* shutdown any bulk reads that might be going on */
358 if (serial->num_bulk_out)
359 usb_kill_urb(port->write_urb);
360 if (serial->num_bulk_in)
361 usb_kill_urb(port->read_urb);
362 if (serial->num_interrupt_in)
363 usb_kill_urb(port->interrupt_in_urb);
364 }
316} 365}
317 366
318static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port) 367static int ark3116_open(struct tty_struct *tty, struct usb_serial_port *port)
319{ 368{
320 struct ktermios tmp_termios; 369 struct ark3116_private *priv = usb_get_serial_port_data(port);
321 struct usb_serial *serial = port->serial; 370 struct usb_serial *serial = port->serial;
322 char *buf; 371 unsigned char *buf;
323 int result = 0; 372 int result;
324
325 dbg("%s - port %d", __func__, port->number);
326 373
327 buf = kmalloc(1, GFP_KERNEL); 374 buf = kmalloc(1, GFP_KERNEL);
328 if (!buf) { 375 if (buf == NULL)
329 dbg("error kmalloc -> out of mem?");
330 return -ENOMEM; 376 return -ENOMEM;
331 }
332 377
333 result = usb_serial_generic_open(tty, port); 378 result = usb_serial_generic_open(tty, port);
334 if (result) 379 if (result) {
380 dbg("%s - usb_serial_generic_open failed: %d",
381 __func__, result);
335 goto err_out; 382 goto err_out;
383 }
336 384
337 /* open */ 385 /* setup termios */
338 ARK3116_RCV(serial, 111, 0xFE, 0xC0, 0x0000, 0x0003, 0x02, buf); 386 if (tty)
339 387 ark3116_set_termios(tty, port, NULL);
340 ARK3116_SND(serial, 112, 0xFE, 0x40, 0x0082, 0x0003);
341 ARK3116_SND(serial, 113, 0xFE, 0x40, 0x001A, 0x0000);
342 ARK3116_SND(serial, 114, 0xFE, 0x40, 0x0000, 0x0001);
343 ARK3116_SND(serial, 115, 0xFE, 0x40, 0x0002, 0x0003);
344
345 ARK3116_RCV(serial, 116, 0xFE, 0xC0, 0x0000, 0x0004, 0x03, buf);
346 ARK3116_SND(serial, 117, 0xFE, 0x40, 0x0002, 0x0004);
347
348 ARK3116_RCV(serial, 118, 0xFE, 0xC0, 0x0000, 0x0004, 0x02, buf);
349 ARK3116_SND(serial, 119, 0xFE, 0x40, 0x0000, 0x0004);
350
351 ARK3116_RCV(serial, 120, 0xFE, 0xC0, 0x0000, 0x0004, 0x00, buf);
352 388
353 ARK3116_SND(serial, 121, 0xFE, 0x40, 0x0001, 0x0004); 389 /* remove any data still left: also clears error state */
390 ark3116_read_reg(serial, UART_RX, buf);
354 391
355 ARK3116_RCV(serial, 122, 0xFE, 0xC0, 0x0000, 0x0004, 0x01, buf); 392 /* read modem status */
393 priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
394 /* read line status */
395 priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
356 396
357 ARK3116_SND(serial, 123, 0xFE, 0x40, 0x0003, 0x0004); 397 result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
398 if (result) {
399 dev_err(&port->dev, "submit irq_in urb failed %d\n",
400 result);
401 ark3116_close(port);
402 goto err_out;
403 }
358 404
359 /* returns different values (control lines?!) */ 405 /* activate interrupts */
360 ARK3116_RCV(serial, 124, 0xFE, 0xC0, 0x0000, 0x0006, 0xFF, buf); 406 ark3116_write_reg(port->serial, UART_IER, UART_IER_MSI|UART_IER_RLSI);
361 407
362 /* initialise termios */ 408 /* enable DMA */
363 if (tty) 409 ark3116_write_reg(port->serial, UART_FCR, UART_FCR_DMA_SELECT);
364 ark3116_set_termios(tty, port, &tmp_termios);
365 410
366err_out: 411err_out:
367 kfree(buf); 412 kfree(buf);
368
369 return result; 413 return result;
370} 414}
371 415
@@ -373,6 +417,7 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
373 unsigned int cmd, unsigned long arg) 417 unsigned int cmd, unsigned long arg)
374{ 418{
375 struct usb_serial_port *port = tty->driver_data; 419 struct usb_serial_port *port = tty->driver_data;
420 struct ark3116_private *priv = usb_get_serial_port_data(port);
376 struct serial_struct serstruct; 421 struct serial_struct serstruct;
377 void __user *user_arg = (void __user *)arg; 422 void __user *user_arg = (void __user *)arg;
378 423
@@ -394,9 +439,48 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
394 if (copy_from_user(&serstruct, user_arg, sizeof(serstruct))) 439 if (copy_from_user(&serstruct, user_arg, sizeof(serstruct)))
395 return -EFAULT; 440 return -EFAULT;
396 return 0; 441 return 0;
397 default: 442 case TIOCMIWAIT:
398 dbg("%s cmd 0x%04x not supported", __func__, cmd); 443 for (;;) {
444 struct async_icount prev = priv->icount;
445 interruptible_sleep_on(&priv->delta_msr_wait);
446 /* see if a signal did it */
447 if (signal_pending(current))
448 return -ERESTARTSYS;
449 if ((prev.rng == priv->icount.rng) &&
450 (prev.dsr == priv->icount.dsr) &&
451 (prev.dcd == priv->icount.dcd) &&
452 (prev.cts == priv->icount.cts))
453 return -EIO;
454 if ((arg & TIOCM_RNG &&
455 (prev.rng != priv->icount.rng)) ||
456 (arg & TIOCM_DSR &&
457 (prev.dsr != priv->icount.dsr)) ||
458 (arg & TIOCM_CD &&
459 (prev.dcd != priv->icount.dcd)) ||
460 (arg & TIOCM_CTS &&
461 (prev.cts != priv->icount.cts)))
462 return 0;
463 }
399 break; 464 break;
465 case TIOCGICOUNT: {
466 struct serial_icounter_struct icount;
467 struct async_icount cnow = priv->icount;
468 memset(&icount, 0, sizeof(icount));
469 icount.cts = cnow.cts;
470 icount.dsr = cnow.dsr;
471 icount.rng = cnow.rng;
472 icount.dcd = cnow.dcd;
473 icount.rx = cnow.rx;
474 icount.tx = cnow.tx;
475 icount.frame = cnow.frame;
476 icount.overrun = cnow.overrun;
477 icount.parity = cnow.parity;
478 icount.brk = cnow.brk;
479 icount.buf_overrun = cnow.buf_overrun;
480 if (copy_to_user(user_arg, &icount, sizeof(icount)))
481 return -EFAULT;
482 return 0;
483 }
400 } 484 }
401 485
402 return -ENOIOCTLCMD; 486 return -ENOIOCTLCMD;
@@ -405,32 +489,273 @@ static int ark3116_ioctl(struct tty_struct *tty, struct file *file,
405static int ark3116_tiocmget(struct tty_struct *tty, struct file *file) 489static int ark3116_tiocmget(struct tty_struct *tty, struct file *file)
406{ 490{
407 struct usb_serial_port *port = tty->driver_data; 491 struct usb_serial_port *port = tty->driver_data;
408 struct usb_serial *serial = port->serial; 492 struct ark3116_private *priv = usb_get_serial_port_data(port);
409 char *buf; 493 __u32 status;
410 char temp; 494 __u32 ctrl;
495 unsigned long flags;
496
497 mutex_lock(&priv->hw_lock);
498 ctrl = priv->mcr;
499 mutex_unlock(&priv->hw_lock);
500
501 spin_lock_irqsave(&priv->status_lock, flags);
502 status = priv->msr;
503 spin_unlock_irqrestore(&priv->status_lock, flags);
504
505 return (status & UART_MSR_DSR ? TIOCM_DSR : 0) |
506 (status & UART_MSR_CTS ? TIOCM_CTS : 0) |
507 (status & UART_MSR_RI ? TIOCM_RI : 0) |
508 (status & UART_MSR_DCD ? TIOCM_CD : 0) |
509 (ctrl & UART_MCR_DTR ? TIOCM_DTR : 0) |
510 (ctrl & UART_MCR_RTS ? TIOCM_RTS : 0) |
511 (ctrl & UART_MCR_OUT1 ? TIOCM_OUT1 : 0) |
512 (ctrl & UART_MCR_OUT2 ? TIOCM_OUT2 : 0);
513}
411 514
412 /* seems like serial port status info (RTS, CTS, ...) is stored 515static int ark3116_tiocmset(struct tty_struct *tty, struct file *file,
413 * in reg(?) 0x0006 516 unsigned set, unsigned clr)
414 * pcb connection point 11 = GND -> sets bit4 of response 517{
415 * pcb connection point 7 = GND -> sets bit6 of response 518 struct usb_serial_port *port = tty->driver_data;
519 struct ark3116_private *priv = usb_get_serial_port_data(port);
520
521 /* we need to take the mutex here, to make sure that the value
522 * in priv->mcr is actually the one that is in the hardware
416 */ 523 */
417 524
418 buf = kmalloc(1, GFP_KERNEL); 525 mutex_lock(&priv->hw_lock);
419 if (!buf) { 526
420 dbg("error kmalloc"); 527 if (set & TIOCM_RTS)
421 return -ENOMEM; 528 priv->mcr |= UART_MCR_RTS;
529 if (set & TIOCM_DTR)
530 priv->mcr |= UART_MCR_DTR;
531 if (set & TIOCM_OUT1)
532 priv->mcr |= UART_MCR_OUT1;
533 if (set & TIOCM_OUT2)
534 priv->mcr |= UART_MCR_OUT2;
535 if (clr & TIOCM_RTS)
536 priv->mcr &= ~UART_MCR_RTS;
537 if (clr & TIOCM_DTR)
538 priv->mcr &= ~UART_MCR_DTR;
539 if (clr & TIOCM_OUT1)
540 priv->mcr &= ~UART_MCR_OUT1;
541 if (clr & TIOCM_OUT2)
542 priv->mcr &= ~UART_MCR_OUT2;
543
544 ark3116_write_reg(port->serial, UART_MCR, priv->mcr);
545
546 mutex_unlock(&priv->hw_lock);
547
548 return 0;
549}
550
551static void ark3116_break_ctl(struct tty_struct *tty, int break_state)
552{
553 struct usb_serial_port *port = tty->driver_data;
554 struct ark3116_private *priv = usb_get_serial_port_data(port);
555
556 /* LCR is also used for other things: protect access */
557 mutex_lock(&priv->hw_lock);
558
559 if (break_state)
560 priv->lcr |= UART_LCR_SBC;
561 else
562 priv->lcr &= ~UART_LCR_SBC;
563
564 ark3116_write_reg(port->serial, UART_LCR, priv->lcr);
565
566 mutex_unlock(&priv->hw_lock);
567}
568
569static void ark3116_update_msr(struct usb_serial_port *port, __u8 msr)
570{
571 struct ark3116_private *priv = usb_get_serial_port_data(port);
572 unsigned long flags;
573
574 spin_lock_irqsave(&priv->status_lock, flags);
575 priv->msr = msr;
576 spin_unlock_irqrestore(&priv->status_lock, flags);
577
578 if (msr & UART_MSR_ANY_DELTA) {
579 /* update input line counters */
580 if (msr & UART_MSR_DCTS)
581 priv->icount.cts++;
582 if (msr & UART_MSR_DDSR)
583 priv->icount.dsr++;
584 if (msr & UART_MSR_DDCD)
585 priv->icount.dcd++;
586 if (msr & UART_MSR_TERI)
587 priv->icount.rng++;
588 wake_up_interruptible(&priv->delta_msr_wait);
422 } 589 }
590}
423 591
424 /* read register */ 592static void ark3116_update_lsr(struct usb_serial_port *port, __u8 lsr)
425 ARK3116_RCV_QUIET(serial, 0xFE, 0xC0, 0x0000, 0x0006, buf); 593{
426 temp = buf[0]; 594 struct ark3116_private *priv = usb_get_serial_port_data(port);
427 kfree(buf); 595 unsigned long flags;
596
597 spin_lock_irqsave(&priv->status_lock, flags);
598 /* combine bits */
599 priv->lsr |= lsr;
600 spin_unlock_irqrestore(&priv->status_lock, flags);
601
602 if (lsr&UART_LSR_BRK_ERROR_BITS) {
603 if (lsr & UART_LSR_BI)
604 priv->icount.brk++;
605 if (lsr & UART_LSR_FE)
606 priv->icount.frame++;
607 if (lsr & UART_LSR_PE)
608 priv->icount.parity++;
609 if (lsr & UART_LSR_OE)
610 priv->icount.overrun++;
611 }
612}
428 613
429 /* i do not really know if bit4=CTS and bit6=DSR... just a 614static void ark3116_read_int_callback(struct urb *urb)
430 * quick guess! 615{
431 */ 616 struct usb_serial_port *port = urb->context;
432 return (temp & (1<<4) ? TIOCM_CTS : 0) 617 int status = urb->status;
433 | (temp & (1<<6) ? TIOCM_DSR : 0); 618 const __u8 *data = urb->transfer_buffer;
619 int result;
620
621 switch (status) {
622 case -ECONNRESET:
623 case -ENOENT:
624 case -ESHUTDOWN:
625 /* this urb is terminated, clean up */
626 dbg("%s - urb shutting down with status: %d",
627 __func__, status);
628 return;
629 default:
630 dbg("%s - nonzero urb status received: %d",
631 __func__, status);
632 break;
633 case 0: /* success */
634 /* discovered this by trail and error... */
635 if ((urb->actual_length == 4) && (data[0] == 0xe8)) {
636 const __u8 id = data[1]&UART_IIR_ID;
637 dbg("%s: iir=%02x", __func__, data[1]);
638 if (id == UART_IIR_MSI) {
639 dbg("%s: msr=%02x", __func__, data[3]);
640 ark3116_update_msr(port, data[3]);
641 break;
642 } else if (id == UART_IIR_RLSI) {
643 dbg("%s: lsr=%02x", __func__, data[2]);
644 ark3116_update_lsr(port, data[2]);
645 break;
646 }
647 }
648 /*
649 * Not sure what this data meant...
650 */
651 usb_serial_debug_data(debug, &port->dev,
652 __func__,
653 urb->actual_length,
654 urb->transfer_buffer);
655 break;
656 }
657
658 result = usb_submit_urb(urb, GFP_ATOMIC);
659 if (result)
660 dev_err(&urb->dev->dev,
661 "%s - Error %d submitting interrupt urb\n",
662 __func__, result);
663}
664
665
666/* Data comes in via the bulk (data) URB, erors/interrupts via the int URB.
667 * This means that we cannot be sure which data byte has an associated error
668 * condition, so we report an error for all data in the next bulk read.
669 *
670 * Actually, there might even be a window between the bulk data leaving the
671 * ark and reading/resetting the lsr in the read_bulk_callback where an
672 * interrupt for the next data block could come in.
673 * Without somekind of ordering on the ark, we would have to report the
674 * error for the next block of data as well...
675 * For now, let's pretend this can't happen.
676 */
677
678static void send_to_tty(struct tty_struct *tty,
679 const unsigned char *chars,
680 size_t size, char flag)
681{
682 if (size == 0)
683 return;
684 if (flag == TTY_NORMAL) {
685 tty_insert_flip_string(tty, chars, size);
686 } else {
687 int i;
688 for (i = 0; i < size; ++i)
689 tty_insert_flip_char(tty, chars[i], flag);
690 }
691}
692
693static void ark3116_read_bulk_callback(struct urb *urb)
694{
695 struct usb_serial_port *port = urb->context;
696 struct ark3116_private *priv = usb_get_serial_port_data(port);
697 const __u8 *data = urb->transfer_buffer;
698 int status = urb->status;
699 struct tty_struct *tty;
700 unsigned long flags;
701 int result;
702 char flag;
703 __u32 lsr;
704
705 switch (status) {
706 case -ECONNRESET:
707 case -ENOENT:
708 case -ESHUTDOWN:
709 /* this urb is terminated, clean up */
710 dbg("%s - urb shutting down with status: %d",
711 __func__, status);
712 return;
713 default:
714 dbg("%s - nonzero urb status received: %d",
715 __func__, status);
716 break;
717 case 0: /* success */
718
719 spin_lock_irqsave(&priv->status_lock, flags);
720 lsr = priv->lsr;
721 /* clear error bits */
722 priv->lsr &= ~UART_LSR_BRK_ERROR_BITS;
723 spin_unlock_irqrestore(&priv->status_lock, flags);
724
725 if (unlikely(lsr & UART_LSR_BI))
726 flag = TTY_BREAK;
727 else if (unlikely(lsr & UART_LSR_PE))
728 flag = TTY_PARITY;
729 else if (unlikely(lsr & UART_LSR_FE))
730 flag = TTY_FRAME;
731 else
732 flag = TTY_NORMAL;
733
734 tty = tty_port_tty_get(&port->port);
735 if (tty) {
736 tty_buffer_request_room(tty, urb->actual_length + 1);
737 /* overrun is special, not associated with a char */
738 if (unlikely(lsr & UART_LSR_OE))
739 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
740 send_to_tty(tty, data, urb->actual_length, flag);
741 tty_flip_buffer_push(tty);
742 tty_kref_put(tty);
743 }
744
745 /* Throttle the device if requested by tty */
746 spin_lock_irqsave(&port->lock, flags);
747 port->throttled = port->throttle_req;
748 if (port->throttled) {
749 spin_unlock_irqrestore(&port->lock, flags);
750 return;
751 } else
752 spin_unlock_irqrestore(&port->lock, flags);
753 }
754 /* Continue reading from device */
755 result = usb_submit_urb(urb, GFP_ATOMIC);
756 if (result)
757 dev_err(&urb->dev->dev, "%s - failed resubmitting"
758 " read urb, error %d\n", __func__, result);
434} 759}
435 760
436static struct usb_driver ark3116_driver = { 761static struct usb_driver ark3116_driver = {
@@ -450,11 +775,17 @@ static struct usb_serial_driver ark3116_device = {
450 .usb_driver = &ark3116_driver, 775 .usb_driver = &ark3116_driver,
451 .num_ports = 1, 776 .num_ports = 1,
452 .attach = ark3116_attach, 777 .attach = ark3116_attach,
778 .release = ark3116_release,
453 .set_termios = ark3116_set_termios, 779 .set_termios = ark3116_set_termios,
454 .init_termios = ark3116_init_termios, 780 .init_termios = ark3116_init_termios,
455 .ioctl = ark3116_ioctl, 781 .ioctl = ark3116_ioctl,
456 .tiocmget = ark3116_tiocmget, 782 .tiocmget = ark3116_tiocmget,
783 .tiocmset = ark3116_tiocmset,
457 .open = ark3116_open, 784 .open = ark3116_open,
785 .close = ark3116_close,
786 .break_ctl = ark3116_break_ctl,
787 .read_int_callback = ark3116_read_int_callback,
788 .read_bulk_callback = ark3116_read_bulk_callback,
458}; 789};
459 790
460static int __init ark3116_init(void) 791static int __init ark3116_init(void)
@@ -465,7 +796,12 @@ static int __init ark3116_init(void)
465 if (retval) 796 if (retval)
466 return retval; 797 return retval;
467 retval = usb_register(&ark3116_driver); 798 retval = usb_register(&ark3116_driver);
468 if (retval) 799 if (retval == 0) {
800 printk(KERN_INFO "%s:"
801 DRIVER_VERSION ":"
802 DRIVER_DESC "\n",
803 KBUILD_MODNAME);
804 } else
469 usb_serial_deregister(&ark3116_device); 805 usb_serial_deregister(&ark3116_device);
470 return retval; 806 return retval;
471} 807}
@@ -480,6 +816,109 @@ module_init(ark3116_init);
480module_exit(ark3116_exit); 816module_exit(ark3116_exit);
481MODULE_LICENSE("GPL"); 817MODULE_LICENSE("GPL");
482 818
819MODULE_AUTHOR(DRIVER_AUTHOR);
820MODULE_DESCRIPTION(DRIVER_DESC);
821
483module_param(debug, bool, S_IRUGO | S_IWUSR); 822module_param(debug, bool, S_IRUGO | S_IWUSR);
484MODULE_PARM_DESC(debug, "Debug enabled or not"); 823MODULE_PARM_DESC(debug, "Enable debug");
485 824
825/*
826 * The following describes what I learned from studying the old
827 * ark3116.c driver, disassembling the windows driver, and some lucky
828 * guesses. Since I do not have any datasheet or other
829 * documentation, inaccuracies are almost guaranteed.
830 *
831 * Some specs for the ARK3116 can be found here:
832 * http://web.archive.org/web/20060318000438/
833 * www.arkmicro.com/en/products/view.php?id=10
834 * On that page, 2 GPIO pins are mentioned: I assume these are the
835 * OUT1 and OUT2 pins of the UART, so I added support for those
836 * through the MCR. Since the pins are not available on my hardware,
837 * I could not verify this.
838 * Also, it states there is "on-chip hardware flow control". I have
839 * discovered how to enable that. Unfortunately, I do not know how to
840 * enable XON/XOFF (software) flow control, which would need support
841 * from the chip as well to work. Because of the wording on the web
842 * page there is a real possibility the chip simply does not support
843 * software flow control.
844 *
845 * I got my ark3116 as part of a mobile phone adapter cable. On the
846 * PCB, the following numbered contacts are present:
847 *
848 * 1:- +5V
849 * 2:o DTR
850 * 3:i RX
851 * 4:i DCD
852 * 5:o RTS
853 * 6:o TX
854 * 7:i RI
855 * 8:i DSR
856 * 10:- 0V
857 * 11:i CTS
858 *
859 * On my chip, all signals seem to be 3.3V, but 5V tolerant. But that
860 * may be different for the one you have ;-).
861 *
862 * The windows driver limits the registers to 0-F, so I assume there
863 * are actually 16 present on the device.
864 *
865 * On an UART interrupt, 4 bytes of data come in on the interrupt
866 * endpoint. The bytes are 0xe8 IIR LSR MSR.
867 *
868 * The baudrate seems to be generated from the 12MHz crystal, using
869 * 4-times subsampling. So quot=12e6/(4*baud). Also see description
870 * of register E.
871 *
872 * Registers 0-7:
873 * These seem to be the same as for a regular 16450. The FCR is set
874 * to UART_FCR_DMA_SELECT (0x8), I guess to enable transfers between
875 * the UART and the USB bridge/DMA engine.
876 *
877 * Register 8:
878 * By trial and error, I found out that bit 0 enables hardware CTS,
879 * stopping TX when CTS is +5V. Bit 1 does the same for RTS, making
880 * RTS +5V when the 3116 cannot transfer the data to the USB bus
881 * (verified by disabling the reading URB). Note that as far as I can
882 * tell, the windows driver does NOT use this, so there might be some
883 * hardware bug or something.
884 *
885 * According to a patch provided here
886 * (http://lkml.org/lkml/2009/7/26/56), the ARK3116 can also be used
887 * as an IrDA dongle. Since I do not have such a thing, I could not
888 * investigate that aspect. However, I can speculate ;-).
889 *
890 * - IrDA encodes data differently than RS232. Most likely, one of
891 * the bits in registers 9..E enables the IR ENDEC (encoder/decoder).
892 * - Depending on the IR transceiver, the input and output need to be
893 * inverted, so there are probably bits for that as well.
894 * - IrDA is half-duplex, so there should be a bit for selecting that.
895 *
896 * This still leaves at least two registers unaccounted for. Perhaps
897 * The chip can do XON/XOFF or CRC in HW?
898 *
899 * Register 9:
900 * Set to 0x00 for IrDA, when the baudrate is initialised.
901 *
902 * Register A:
903 * Set to 0x01 for IrDA, at init.
904 *
905 * Register B:
906 * Set to 0x01 for IrDA, 0x00 for RS232, at init.
907 *
908 * Register C:
909 * Set to 00 for IrDA, at init.
910 *
911 * Register D:
912 * Set to 0x41 for IrDA, at init.
913 *
914 * Register E:
915 * Somekind of baudrate override. The windows driver seems to set
916 * this to 0x00 for normal baudrates, 0x01 for 460800, 0x02 for 921600.
917 * Since 460800 and 921600 cannot be obtained by dividing 3MHz by an integer,
918 * it could be somekind of subdivisor thingy.
919 * However,it does not seem to do anything: selecting 921600 (divisor 3,
920 * reg E=2), still gets 1 MHz. I also checked if registers 9, C or F would
921 * work, but they don't.
922 *
923 * Register F: unknown
924 */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ebcc6d0e2e91..f99498fca99a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -598,6 +598,20 @@ static struct usb_device_id id_table_combined [] = {
598 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) }, 598 { USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
599 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) }, 599 { USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
600 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) }, 600 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
601 { USB_DEVICE(BANDB_VID, BANDB_USOPTL4_PID) },
602 { USB_DEVICE(BANDB_VID, BANDB_USPTL4_PID) },
603 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_2_PID) },
604 { USB_DEVICE(BANDB_VID, BANDB_USO9ML2DR_PID) },
605 { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR2_PID) },
606 { USB_DEVICE(BANDB_VID, BANDB_USOPTL4DR_PID) },
607 { USB_DEVICE(BANDB_VID, BANDB_485USB9F_2W_PID) },
608 { USB_DEVICE(BANDB_VID, BANDB_485USB9F_4W_PID) },
609 { USB_DEVICE(BANDB_VID, BANDB_232USB9M_PID) },
610 { USB_DEVICE(BANDB_VID, BANDB_485USBTB_2W_PID) },
611 { USB_DEVICE(BANDB_VID, BANDB_485USBTB_4W_PID) },
612 { USB_DEVICE(BANDB_VID, BANDB_TTL5USB9M_PID) },
613 { USB_DEVICE(BANDB_VID, BANDB_TTL3USB9M_PID) },
614 { USB_DEVICE(BANDB_VID, BANDB_ZZ_PROG1_USB_PID) },
601 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, 615 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
602 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, 616 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
603 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, 617 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
@@ -2195,15 +2209,21 @@ static void ftdi_set_termios(struct tty_struct *tty,
2195 2209
2196 /* Set number of data bits, parity, stop bits */ 2210 /* Set number of data bits, parity, stop bits */
2197 2211
2198 termios->c_cflag &= ~CMSPAR;
2199
2200 urb_value = 0; 2212 urb_value = 0;
2201 urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 : 2213 urb_value |= (cflag & CSTOPB ? FTDI_SIO_SET_DATA_STOP_BITS_2 :
2202 FTDI_SIO_SET_DATA_STOP_BITS_1); 2214 FTDI_SIO_SET_DATA_STOP_BITS_1);
2203 urb_value |= (cflag & PARENB ? 2215 if (cflag & PARENB) {
2204 (cflag & PARODD ? FTDI_SIO_SET_DATA_PARITY_ODD : 2216 if (cflag & CMSPAR)
2205 FTDI_SIO_SET_DATA_PARITY_EVEN) : 2217 urb_value |= cflag & PARODD ?
2206 FTDI_SIO_SET_DATA_PARITY_NONE); 2218 FTDI_SIO_SET_DATA_PARITY_MARK :
2219 FTDI_SIO_SET_DATA_PARITY_SPACE;
2220 else
2221 urb_value |= cflag & PARODD ?
2222 FTDI_SIO_SET_DATA_PARITY_ODD :
2223 FTDI_SIO_SET_DATA_PARITY_EVEN;
2224 } else {
2225 urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
2226 }
2207 if (cflag & CSIZE) { 2227 if (cflag & CSIZE) {
2208 switch (cflag & CSIZE) { 2228 switch (cflag & CSIZE) {
2209 case CS5: urb_value |= 5; dbg("Setting CS5"); break; 2229 case CS5: urb_value |= 5; dbg("Setting CS5"); break;
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index 6f31e0d71898..4586a24fafb0 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -662,6 +662,20 @@
662#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */ 662#define BANDB_USOTL4_PID 0xAC01 /* USOTL4 Isolated RS-485 Converter */
663#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */ 663#define BANDB_USTL4_PID 0xAC02 /* USTL4 RS-485 Converter */
664#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */ 664#define BANDB_USO9ML2_PID 0xAC03 /* USO9ML2 Isolated RS-232 Converter */
665#define BANDB_USOPTL4_PID 0xAC11
666#define BANDB_USPTL4_PID 0xAC12
667#define BANDB_USO9ML2DR_2_PID 0xAC16
668#define BANDB_USO9ML2DR_PID 0xAC17
669#define BANDB_USOPTL4DR2_PID 0xAC18 /* USOPTL4R-2 2-port Isolated RS-232 Converter */
670#define BANDB_USOPTL4DR_PID 0xAC19
671#define BANDB_485USB9F_2W_PID 0xAC25
672#define BANDB_485USB9F_4W_PID 0xAC26
673#define BANDB_232USB9M_PID 0xAC27
674#define BANDB_485USBTB_2W_PID 0xAC33
675#define BANDB_485USBTB_4W_PID 0xAC34
676#define BANDB_TTL5USB9M_PID 0xAC49
677#define BANDB_TTL3USB9M_PID 0xAC50
678#define BANDB_ZZ_PROG1_USB_PID 0xBA02
665 679
666/* 680/*
667 * RM Michaelides CANview USB (http://www.rmcan.com) 681 * RM Michaelides CANview USB (http://www.rmcan.com)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index f11abf52be7d..485fa9c5b107 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -121,8 +121,14 @@
121 * moschip_id_table_combined 121 * moschip_id_table_combined
122 */ 122 */
123#define USB_VENDOR_ID_BANDB 0x0856 123#define USB_VENDOR_ID_BANDB 0x0856
124#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44 124#define BANDB_DEVICE_ID_USO9ML2_2 0xAC22
125#define BANDB_DEVICE_ID_USO9ML2_4 0xAC24
126#define BANDB_DEVICE_ID_US9ML2_2 0xAC29
127#define BANDB_DEVICE_ID_US9ML2_4 0xAC30
128#define BANDB_DEVICE_ID_USPTL4_2 0xAC31
129#define BANDB_DEVICE_ID_USPTL4_4 0xAC32
125#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42 130#define BANDB_DEVICE_ID_USOPTL4_2 0xAC42
131#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
126 132
127/* This driver also supports 133/* This driver also supports
128 * ATEN UC2324 device using Moschip MCS7840 134 * ATEN UC2324 device using Moschip MCS7840
@@ -177,8 +183,14 @@
177static struct usb_device_id moschip_port_id_table[] = { 183static struct usb_device_id moschip_port_id_table[] = {
178 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 184 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
179 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 185 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
180 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 186 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
187 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
188 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
189 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
181 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 192 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
193 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
182 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 194 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
183 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 195 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
184 {} /* terminating entry */ 196 {} /* terminating entry */
@@ -187,8 +199,14 @@ static struct usb_device_id moschip_port_id_table[] = {
187static __devinitdata struct usb_device_id moschip_id_table_combined[] = { 199static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
188 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 200 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
189 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 201 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
190 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)}, 202 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
203 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_4)},
204 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_2)},
205 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_US9ML2_4)},
206 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_2)},
207 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USPTL4_4)},
191 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)}, 208 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2)},
209 {USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4)},
192 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)}, 210 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2324)},
193 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)}, 211 {USB_DEVICE(USB_VENDOR_ID_ATENINTL, ATENINTL_DEVICE_ID_UC2322)},
194 {} /* terminating entry */ 212 {} /* terminating entry */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 0577e4b61114..9a2b903492ec 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -580,12 +580,48 @@ static struct usb_device_id option_ids[] = {
580 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) }, 580 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
581 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, 581 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) },
582 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, 582 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
583 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff) },
584 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
585 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
586 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff) },
587 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
588 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) },
589 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) },
590 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
591 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) },
592 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) },
593 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) },
594 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) },
595 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
596 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) },
597 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
598 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
599 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
600 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
601 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
602 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
603 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
604 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
605 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
606 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
607 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
608 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
609 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
610 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
611 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
612 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
613 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
614 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
615 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
616 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
583 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ 617 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
584 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, 618 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
585 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, 619 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
586 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, 620 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
587 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, 621 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
588 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, 622 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
623 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
624 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
589 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 625 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
590 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 626 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
591 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 627 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -599,6 +635,7 @@ static struct usb_device_id option_ids[] = {
599 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, 635 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
600 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 636 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
601 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 637 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
638 { USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
602 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 639 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
603 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 640 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
604 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 641 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
@@ -1312,7 +1349,7 @@ static int option_suspend(struct usb_serial *serial, pm_message_t message)
1312 1349
1313 dbg("%s entered", __func__); 1350 dbg("%s entered", __func__);
1314 1351
1315 if (serial->dev->auto_pm) { 1352 if (message.event & PM_EVENT_AUTO) {
1316 spin_lock_irq(&intfdata->susp_lock); 1353 spin_lock_irq(&intfdata->susp_lock);
1317 b = intfdata->in_flight; 1354 b = intfdata->in_flight;
1318 spin_unlock_irq(&intfdata->susp_lock); 1355 spin_unlock_irq(&intfdata->susp_lock);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 5019325ba25d..ac1b6449fb6a 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -16,8 +16,9 @@
16 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> 16 Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de>
17 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> 17 Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org>
18*/ 18*/
19 19/* Uncomment to log function calls */
20#define DRIVER_VERSION "v.1.3.8" 20/* #define DEBUG */
21#define DRIVER_VERSION "v.1.7.16"
21#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer" 22#define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer"
22#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" 23#define DRIVER_DESC "USB Driver for Sierra Wireless USB modems"
23 24
@@ -33,8 +34,10 @@
33#define SWIMS_USB_REQUEST_SetPower 0x00 34#define SWIMS_USB_REQUEST_SetPower 0x00
34#define SWIMS_USB_REQUEST_SetNmea 0x07 35#define SWIMS_USB_REQUEST_SetNmea 0x07
35 36
36#define N_IN_URB 8 37#define N_IN_URB_HM 8
37#define N_OUT_URB 64 38#define N_OUT_URB_HM 64
39#define N_IN_URB 4
40#define N_OUT_URB 4
38#define IN_BUFLEN 4096 41#define IN_BUFLEN 4096
39 42
40#define MAX_TRANSFER (PAGE_SIZE - 512) 43#define MAX_TRANSFER (PAGE_SIZE - 512)
@@ -124,6 +127,23 @@ static int is_blacklisted(const u8 ifnum,
124 return 0; 127 return 0;
125} 128}
126 129
130static int is_himemory(const u8 ifnum,
131 const struct sierra_iface_info *himemorylist)
132{
133 const u8 *info;
134 int i;
135
136 if (himemorylist) {
137 info = himemorylist->ifaceinfo;
138
139 for (i=0; i < himemorylist->infolen; i++) {
140 if (info[i] == ifnum)
141 return 1;
142 }
143 }
144 return 0;
145}
146
127static int sierra_calc_interface(struct usb_serial *serial) 147static int sierra_calc_interface(struct usb_serial *serial)
128{ 148{
129 int interface; 149 int interface;
@@ -186,6 +206,20 @@ static int sierra_probe(struct usb_serial *serial,
186 return result; 206 return result;
187} 207}
188 208
209/* interfaces with higher memory requirements */
210static const u8 hi_memory_typeA_ifaces[] = { 0, 2 };
211static const struct sierra_iface_info typeA_interface_list = {
212 .infolen = ARRAY_SIZE(hi_memory_typeA_ifaces),
213 .ifaceinfo = hi_memory_typeA_ifaces,
214};
215
216static const u8 hi_memory_typeB_ifaces[] = { 3, 4, 5, 6 };
217static const struct sierra_iface_info typeB_interface_list = {
218 .infolen = ARRAY_SIZE(hi_memory_typeB_ifaces),
219 .ifaceinfo = hi_memory_typeB_ifaces,
220};
221
222/* 'blacklist' of interfaces not served by this driver */
189static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; 223static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 };
190static const struct sierra_iface_info direct_ip_interface_blacklist = { 224static const struct sierra_iface_info direct_ip_interface_blacklist = {
191 .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), 225 .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces),
@@ -286,8 +320,10 @@ struct sierra_port_private {
286 struct usb_anchor active; 320 struct usb_anchor active;
287 struct usb_anchor delayed; 321 struct usb_anchor delayed;
288 322
323 int num_out_urbs;
324 int num_in_urbs;
289 /* Input endpoints and buffers for this port */ 325 /* Input endpoints and buffers for this port */
290 struct urb *in_urbs[N_IN_URB]; 326 struct urb *in_urbs[N_IN_URB_HM];
291 327
292 /* Settings for the port */ 328 /* Settings for the port */
293 int rts_state; /* Handshaking pins (outputs) */ 329 int rts_state; /* Handshaking pins (outputs) */
@@ -460,7 +496,7 @@ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
460 spin_lock_irqsave(&portdata->lock, flags); 496 spin_lock_irqsave(&portdata->lock, flags);
461 dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__, 497 dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__,
462 portdata->outstanding_urbs); 498 portdata->outstanding_urbs);
463 if (portdata->outstanding_urbs > N_OUT_URB) { 499 if (portdata->outstanding_urbs > portdata->num_out_urbs) {
464 spin_unlock_irqrestore(&portdata->lock, flags); 500 spin_unlock_irqrestore(&portdata->lock, flags);
465 dev_dbg(&port->dev, "%s - write limit hit\n", __func__); 501 dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
466 return 0; 502 return 0;
@@ -665,7 +701,7 @@ static int sierra_write_room(struct tty_struct *tty)
665 /* try to give a good number back based on if we have any free urbs at 701 /* try to give a good number back based on if we have any free urbs at
666 * this point in time */ 702 * this point in time */
667 spin_lock_irqsave(&portdata->lock, flags); 703 spin_lock_irqsave(&portdata->lock, flags);
668 if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) { 704 if (portdata->outstanding_urbs > (portdata->num_out_urbs * 2) / 3) {
669 spin_unlock_irqrestore(&portdata->lock, flags); 705 spin_unlock_irqrestore(&portdata->lock, flags);
670 dev_dbg(&port->dev, "%s - write limit hit\n", __func__); 706 dev_dbg(&port->dev, "%s - write limit hit\n", __func__);
671 return 0; 707 return 0;
@@ -680,7 +716,7 @@ static void sierra_stop_rx_urbs(struct usb_serial_port *port)
680 int i; 716 int i;
681 struct sierra_port_private *portdata = usb_get_serial_port_data(port); 717 struct sierra_port_private *portdata = usb_get_serial_port_data(port);
682 718
683 for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) 719 for (i = 0; i < portdata->num_in_urbs; i++)
684 usb_kill_urb(portdata->in_urbs[i]); 720 usb_kill_urb(portdata->in_urbs[i]);
685 721
686 usb_kill_urb(port->interrupt_in_urb); 722 usb_kill_urb(port->interrupt_in_urb);
@@ -695,7 +731,7 @@ static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags)
695 struct sierra_port_private *portdata = usb_get_serial_port_data(port); 731 struct sierra_port_private *portdata = usb_get_serial_port_data(port);
696 732
697 ok_cnt = 0; 733 ok_cnt = 0;
698 for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) { 734 for (i = 0; i < portdata->num_in_urbs; i++) {
699 urb = portdata->in_urbs[i]; 735 urb = portdata->in_urbs[i];
700 if (!urb) 736 if (!urb)
701 continue; 737 continue;
@@ -791,7 +827,7 @@ static void sierra_close(struct usb_serial_port *port)
791 /* Stop reading urbs */ 827 /* Stop reading urbs */
792 sierra_stop_rx_urbs(port); 828 sierra_stop_rx_urbs(port);
793 /* .. and release them */ 829 /* .. and release them */
794 for (i = 0; i < N_IN_URB; i++) { 830 for (i = 0; i < portdata->num_in_urbs; i++) {
795 sierra_release_urb(portdata->in_urbs[i]); 831 sierra_release_urb(portdata->in_urbs[i]);
796 portdata->in_urbs[i] = NULL; 832 portdata->in_urbs[i] = NULL;
797 } 833 }
@@ -818,7 +854,7 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
818 854
819 855
820 endpoint = port->bulk_in_endpointAddress; 856 endpoint = port->bulk_in_endpointAddress;
821 for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) { 857 for (i = 0; i < portdata->num_in_urbs; i++) {
822 urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port, 858 urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port,
823 IN_BUFLEN, GFP_KERNEL, 859 IN_BUFLEN, GFP_KERNEL,
824 sierra_indat_callback); 860 sierra_indat_callback);
@@ -869,7 +905,9 @@ static int sierra_startup(struct usb_serial *serial)
869{ 905{
870 struct usb_serial_port *port; 906 struct usb_serial_port *port;
871 struct sierra_port_private *portdata; 907 struct sierra_port_private *portdata;
908 struct sierra_iface_info *himemoryp = NULL;
872 int i; 909 int i;
910 u8 ifnum;
873 911
874 dev_dbg(&serial->dev->dev, "%s\n", __func__); 912 dev_dbg(&serial->dev->dev, "%s\n", __func__);
875 913
@@ -886,13 +924,40 @@ static int sierra_startup(struct usb_serial *serial)
886 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); 924 portdata = kzalloc(sizeof(*portdata), GFP_KERNEL);
887 if (!portdata) { 925 if (!portdata) {
888 dev_dbg(&port->dev, "%s: kmalloc for " 926 dev_dbg(&port->dev, "%s: kmalloc for "
889 "sierra_port_private (%d) failed!.\n", 927 "sierra_port_private (%d) failed!\n",
890 __func__, i); 928 __func__, i);
891 return -ENOMEM; 929 return -ENOMEM;
892 } 930 }
893 spin_lock_init(&portdata->lock); 931 spin_lock_init(&portdata->lock);
894 init_usb_anchor(&portdata->active); 932 init_usb_anchor(&portdata->active);
895 init_usb_anchor(&portdata->delayed); 933 init_usb_anchor(&portdata->delayed);
934 ifnum = i;
935 /* Assume low memory requirements */
936 portdata->num_out_urbs = N_OUT_URB;
937 portdata->num_in_urbs = N_IN_URB;
938
939 /* Determine actual memory requirements */
940 if (serial->num_ports == 1) {
941 /* Get interface number for composite device */
942 ifnum = sierra_calc_interface(serial);
943 himemoryp =
944 (struct sierra_iface_info *)&typeB_interface_list;
945 if (is_himemory(ifnum, himemoryp)) {
946 portdata->num_out_urbs = N_OUT_URB_HM;
947 portdata->num_in_urbs = N_IN_URB_HM;
948 }
949 }
950 else {
951 himemoryp =
952 (struct sierra_iface_info *)&typeA_interface_list;
953 if (is_himemory(i, himemoryp)) {
954 portdata->num_out_urbs = N_OUT_URB_HM;
955 portdata->num_in_urbs = N_IN_URB_HM;
956 }
957 }
958 dev_dbg(&serial->dev->dev,
959 "Memory usage (urbs) interface #%d, in=%d, out=%d\n",
960 ifnum,portdata->num_in_urbs, portdata->num_out_urbs );
896 /* Set the port private data pointer */ 961 /* Set the port private data pointer */
897 usb_set_serial_port_data(port, portdata); 962 usb_set_serial_port_data(port, portdata);
898 } 963 }
@@ -940,7 +1005,7 @@ static int sierra_suspend(struct usb_serial *serial, pm_message_t message)
940 struct sierra_intf_private *intfdata; 1005 struct sierra_intf_private *intfdata;
941 int b; 1006 int b;
942 1007
943 if (serial->dev->auto_pm) { 1008 if (message.event & PM_EVENT_AUTO) {
944 intfdata = serial->private; 1009 intfdata = serial->private;
945 spin_lock_irq(&intfdata->susp_lock); 1010 spin_lock_irq(&intfdata->susp_lock);
946 b = intfdata->in_flight; 1011 b = intfdata->in_flight;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index cfa26d56ce60..e5e6df39e737 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -73,7 +73,8 @@
73 73
74static const char* host_info(struct Scsi_Host *host) 74static const char* host_info(struct Scsi_Host *host)
75{ 75{
76 return "SCSI emulation for USB Mass Storage devices"; 76 struct us_data *us = host_to_us(host);
77 return us->scsi_name;
77} 78}
78 79
79static int slave_alloc (struct scsi_device *sdev) 80static int slave_alloc (struct scsi_device *sdev)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 589f6b4404f0..cc313d16d727 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -666,10 +666,11 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
666 * to wait for at least one CHECK_CONDITION to determine 666 * to wait for at least one CHECK_CONDITION to determine
667 * SANE_SENSE support 667 * SANE_SENSE support
668 */ 668 */
669 if ((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) && 669 if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
670 result == USB_STOR_TRANSPORT_GOOD && 670 result == USB_STOR_TRANSPORT_GOOD &&
671 !(us->fflags & US_FL_SANE_SENSE) && 671 !(us->fflags & US_FL_SANE_SENSE) &&
672 !(srb->cmnd[2] & 0x20)) { 672 !(us->fflags & US_FL_BAD_SENSE) &&
673 !(srb->cmnd[2] & 0x20))) {
673 US_DEBUGP("-- SAT supported, increasing auto-sense\n"); 674 US_DEBUGP("-- SAT supported, increasing auto-sense\n");
674 us->fflags |= US_FL_SANE_SENSE; 675 us->fflags |= US_FL_SANE_SENSE;
675 } 676 }
@@ -718,6 +719,12 @@ Retry_Sense:
718 if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) { 719 if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
719 US_DEBUGP("-- auto-sense aborted\n"); 720 US_DEBUGP("-- auto-sense aborted\n");
720 srb->result = DID_ABORT << 16; 721 srb->result = DID_ABORT << 16;
722
723 /* If SANE_SENSE caused this problem, disable it */
724 if (sense_size != US_SENSE_SIZE) {
725 us->fflags &= ~US_FL_SANE_SENSE;
726 us->fflags |= US_FL_BAD_SENSE;
727 }
721 goto Handle_Errors; 728 goto Handle_Errors;
722 } 729 }
723 730
@@ -727,10 +734,11 @@ Retry_Sense:
727 * (small) sense request. This fixes some USB GSM modems 734 * (small) sense request. This fixes some USB GSM modems
728 */ 735 */
729 if (temp_result == USB_STOR_TRANSPORT_FAILED && 736 if (temp_result == USB_STOR_TRANSPORT_FAILED &&
730 (us->fflags & US_FL_SANE_SENSE) && 737 sense_size != US_SENSE_SIZE) {
731 sense_size != US_SENSE_SIZE) {
732 US_DEBUGP("-- auto-sense failure, retry small sense\n"); 738 US_DEBUGP("-- auto-sense failure, retry small sense\n");
733 sense_size = US_SENSE_SIZE; 739 sense_size = US_SENSE_SIZE;
740 us->fflags &= ~US_FL_SANE_SENSE;
741 us->fflags |= US_FL_BAD_SENSE;
734 goto Retry_Sense; 742 goto Retry_Sense;
735 } 743 }
736 744
@@ -754,6 +762,7 @@ Retry_Sense:
754 */ 762 */
755 if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) && 763 if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
756 !(us->fflags & US_FL_SANE_SENSE) && 764 !(us->fflags & US_FL_SANE_SENSE) &&
765 !(us->fflags & US_FL_BAD_SENSE) &&
757 (srb->sense_buffer[0] & 0x7C) == 0x70) { 766 (srb->sense_buffer[0] & 0x7C) == 0x70) {
758 US_DEBUGP("-- SANE_SENSE support enabled\n"); 767 US_DEBUGP("-- SANE_SENSE support enabled\n");
759 us->fflags |= US_FL_SANE_SENSE; 768 us->fflags |= US_FL_SANE_SENSE;
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index d4f034ebaa8a..64a0a2c27e12 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -818,6 +818,13 @@ UNUSUAL_DEV( 0x066f, 0x8000, 0x0001, 0x0001,
818 US_SC_DEVICE, US_PR_DEVICE, NULL, 818 US_SC_DEVICE, US_PR_DEVICE, NULL,
819 US_FL_FIX_CAPACITY ), 819 US_FL_FIX_CAPACITY ),
820 820
821/* Reported by Daniel Kukula <daniel.kuku@gmail.com> */
822UNUSUAL_DEV( 0x067b, 0x1063, 0x0100, 0x0100,
823 "Prolific Technology, Inc.",
824 "Prolific Storage Gadget",
825 US_SC_DEVICE, US_PR_DEVICE, NULL,
826 US_FL_BAD_SENSE ),
827
821/* Reported by Rogerio Brito <rbrito@ime.usp.br> */ 828/* Reported by Rogerio Brito <rbrito@ime.usp.br> */
822UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001, 829UNUSUAL_DEV( 0x067b, 0x2317, 0x0001, 0x001,
823 "Prolific Technology, Inc.", 830 "Prolific Technology, Inc.",
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 8060b85fe1a3..5a53d4f0dd11 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -45,6 +45,10 @@
45 * 675 Mass Ave, Cambridge, MA 02139, USA. 45 * 675 Mass Ave, Cambridge, MA 02139, USA.
46 */ 46 */
47 47
48#ifdef CONFIG_USB_STORAGE_DEBUG
49#define DEBUG
50#endif
51
48#include <linux/sched.h> 52#include <linux/sched.h>
49#include <linux/errno.h> 53#include <linux/errno.h>
50#include <linux/freezer.h> 54#include <linux/freezer.h>
@@ -228,6 +232,7 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
228 if (data_len<36) // You lose. 232 if (data_len<36) // You lose.
229 return; 233 return;
230 234
235 memset(data+8, ' ', 28);
231 if(data[0]&0x20) { /* USB device currently not connected. Return 236 if(data[0]&0x20) { /* USB device currently not connected. Return
232 peripheral qualifier 001b ("...however, the 237 peripheral qualifier 001b ("...however, the
233 physical device is not currently connected 238 physical device is not currently connected
@@ -237,15 +242,15 @@ void fill_inquiry_response(struct us_data *us, unsigned char *data,
237 device, it may return zeros or ASCII spaces 242 device, it may return zeros or ASCII spaces
238 (20h) in those fields until the data is 243 (20h) in those fields until the data is
239 available from the device."). */ 244 available from the device."). */
240 memset(data+8,0,28);
241 } else { 245 } else {
242 u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice); 246 u16 bcdDevice = le16_to_cpu(us->pusb_dev->descriptor.bcdDevice);
243 memcpy(data+8, us->unusual_dev->vendorName, 247 int n;
244 strlen(us->unusual_dev->vendorName) > 8 ? 8 : 248
245 strlen(us->unusual_dev->vendorName)); 249 n = strlen(us->unusual_dev->vendorName);
246 memcpy(data+16, us->unusual_dev->productName, 250 memcpy(data+8, us->unusual_dev->vendorName, min(8, n));
247 strlen(us->unusual_dev->productName) > 16 ? 16 : 251 n = strlen(us->unusual_dev->productName);
248 strlen(us->unusual_dev->productName)); 252 memcpy(data+16, us->unusual_dev->productName, min(16, n));
253
249 data[32] = 0x30 + ((bcdDevice>>12) & 0x0F); 254 data[32] = 0x30 + ((bcdDevice>>12) & 0x0F);
250 data[33] = 0x30 + ((bcdDevice>>8) & 0x0F); 255 data[33] = 0x30 + ((bcdDevice>>8) & 0x0F);
251 data[34] = 0x30 + ((bcdDevice>>4) & 0x0F); 256 data[34] = 0x30 + ((bcdDevice>>4) & 0x0F);
@@ -459,6 +464,9 @@ static void adjust_quirks(struct us_data *us)
459 case 'a': 464 case 'a':
460 f |= US_FL_SANE_SENSE; 465 f |= US_FL_SANE_SENSE;
461 break; 466 break;
467 case 'b':
468 f |= US_FL_BAD_SENSE;
469 break;
462 case 'c': 470 case 'c':
463 f |= US_FL_FIX_CAPACITY; 471 f |= US_FL_FIX_CAPACITY;
464 break; 472 break;
@@ -808,14 +816,13 @@ static int usb_stor_scan_thread(void * __us)
808{ 816{
809 struct us_data *us = (struct us_data *)__us; 817 struct us_data *us = (struct us_data *)__us;
810 818
811 printk(KERN_DEBUG 819 dev_dbg(&us->pusb_intf->dev, "device found\n");
812 "usb-storage: device found at %d\n", us->pusb_dev->devnum);
813 820
814 set_freezable(); 821 set_freezable();
815 /* Wait for the timeout to expire or for a disconnect */ 822 /* Wait for the timeout to expire or for a disconnect */
816 if (delay_use > 0) { 823 if (delay_use > 0) {
817 printk(KERN_DEBUG "usb-storage: waiting for device " 824 dev_dbg(&us->pusb_intf->dev, "waiting for device to settle "
818 "to settle before scanning\n"); 825 "before scanning\n");
819 wait_event_freezable_timeout(us->delay_wait, 826 wait_event_freezable_timeout(us->delay_wait,
820 test_bit(US_FLIDX_DONT_SCAN, &us->dflags), 827 test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
821 delay_use * HZ); 828 delay_use * HZ);
@@ -832,7 +839,7 @@ static int usb_stor_scan_thread(void * __us)
832 mutex_unlock(&us->dev_mutex); 839 mutex_unlock(&us->dev_mutex);
833 } 840 }
834 scsi_scan_host(us_to_host(us)); 841 scsi_scan_host(us_to_host(us));
835 printk(KERN_DEBUG "usb-storage: device scan complete\n"); 842 dev_dbg(&us->pusb_intf->dev, "scan complete\n");
836 843
837 /* Should we unbind if no devices were detected? */ 844 /* Should we unbind if no devices were detected? */
838 } 845 }
@@ -840,6 +847,15 @@ static int usb_stor_scan_thread(void * __us)
840 complete_and_exit(&us->scanning_done, 0); 847 complete_and_exit(&us->scanning_done, 0);
841} 848}
842 849
850static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
851{
852 struct usb_device *usb_dev = interface_to_usbdev(intf);
853
854 if (usb_dev->bus->sg_tablesize) {
855 return usb_dev->bus->sg_tablesize;
856 }
857 return SG_ALL;
858}
843 859
844/* First part of general USB mass-storage probing */ 860/* First part of general USB mass-storage probing */
845int usb_stor_probe1(struct us_data **pus, 861int usb_stor_probe1(struct us_data **pus,
@@ -868,6 +884,7 @@ int usb_stor_probe1(struct us_data **pus,
868 * Allow 16-byte CDBs and thus > 2TB 884 * Allow 16-byte CDBs and thus > 2TB
869 */ 885 */
870 host->max_cmd_len = 16; 886 host->max_cmd_len = 16;
887 host->sg_tablesize = usb_stor_sg_tablesize(intf);
871 *pus = us = host_to_us(host); 888 *pus = us = host_to_us(host);
872 memset(us, 0, sizeof(struct us_data)); 889 memset(us, 0, sizeof(struct us_data));
873 mutex_init(&(us->dev_mutex)); 890 mutex_init(&(us->dev_mutex));
@@ -929,6 +946,8 @@ int usb_stor_probe2(struct us_data *us)
929 result = usb_stor_acquire_resources(us); 946 result = usb_stor_acquire_resources(us);
930 if (result) 947 if (result)
931 goto BadDevice; 948 goto BadDevice;
949 snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s",
950 dev_name(&us->pusb_intf->dev));
932 result = scsi_add_host(us_to_host(us), &us->pusb_intf->dev); 951 result = scsi_add_host(us_to_host(us), &us->pusb_intf->dev);
933 if (result) { 952 if (result) {
934 printk(KERN_WARNING USB_STORAGE 953 printk(KERN_WARNING USB_STORAGE
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 2609efb2bd7e..69717134231b 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -132,6 +132,7 @@ struct us_data {
132 /* SCSI interfaces */ 132 /* SCSI interfaces */
133 struct scsi_cmnd *srb; /* current srb */ 133 struct scsi_cmnd *srb; /* current srb */
134 unsigned int tag; /* current dCBWTag */ 134 unsigned int tag; /* current dCBWTag */
135 char scsi_name[32]; /* scsi_host name */
135 136
136 /* control and bulk communications data */ 137 /* control and bulk communications data */
137 struct urb *current_urb; /* USB requests */ 138 struct urb *current_urb; /* USB requests */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b62f2bc064f6..b1e579c5c97c 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -358,7 +358,7 @@ retry:
358 rv = skel_do_read_io(dev, count); 358 rv = skel_do_read_io(dev, count);
359 if (rv < 0) 359 if (rv < 0)
360 goto exit; 360 goto exit;
361 else if (!file->f_flags & O_NONBLOCK) 361 else if (!(file->f_flags & O_NONBLOCK))
362 goto retry; 362 goto retry;
363 rv = -EAGAIN; 363 rv = -EAGAIN;
364 } 364 }
@@ -411,7 +411,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
411 * limit the number of URBs in flight to stop a user from using up all 411 * limit the number of URBs in flight to stop a user from using up all
412 * RAM 412 * RAM
413 */ 413 */
414 if (!file->f_flags & O_NONBLOCK) { 414 if (!(file->f_flags & O_NONBLOCK)) {
415 if (down_interruptible(&dev->limit_sem)) { 415 if (down_interruptible(&dev->limit_sem)) {
416 retval = -ERESTARTSYS; 416 retval = -ERESTARTSYS;
417 goto exit; 417 goto exit;
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index 4ac4300a3f9a..dced419f7aba 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -119,10 +119,12 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
119 urb = usb_alloc_urb(0, GFP_KERNEL); 119 urb = usb_alloc_urb(0, GFP_KERNEL);
120 if (urb == NULL) 120 if (urb == NULL)
121 goto err; 121 goto err;
122 wusb_dev->set_gtk_urb = urb;
122 123
123 req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); 124 req = kmalloc(sizeof(*req), GFP_KERNEL);
124 if (req == NULL) 125 if (req == NULL)
125 goto err; 126 goto err;
127 wusb_dev->set_gtk_req = req;
126 128
127 req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; 129 req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE;
128 req->bRequest = USB_REQ_SET_DESCRIPTOR; 130 req->bRequest = USB_REQ_SET_DESCRIPTOR;
@@ -130,9 +132,6 @@ static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc)
130 req->wIndex = 0; 132 req->wIndex = 0;
131 req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); 133 req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength);
132 134
133 wusb_dev->set_gtk_urb = urb;
134 wusb_dev->set_gtk_req = req;
135
136 return wusb_dev; 135 return wusb_dev;
137err: 136err:
138 wusb_dev_free(wusb_dev); 137 wusb_dev_free(wusb_dev);
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 4516c36436e6..edcd2d756037 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -205,15 +205,15 @@ int wusb_dev_sec_add(struct wusbhc *wusbhc,
205 const void *itr, *top; 205 const void *itr, *top;
206 char buf[64]; 206 char buf[64];
207 207
208 secd = kmalloc(sizeof(struct usb_security_descriptor), GFP_KERNEL); 208 secd = kmalloc(sizeof(*secd), GFP_KERNEL);
209 if (secd == NULL) { 209 if (secd == NULL) {
210 result = -ENOMEM; 210 result = -ENOMEM;
211 goto out; 211 goto out;
212 } 212 }
213 213
214 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 214 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
215 0, secd, sizeof(struct usb_security_descriptor)); 215 0, secd, sizeof(*secd));
216 if (result < sizeof(secd)) { 216 if (result < sizeof(*secd)) {
217 dev_err(dev, "Can't read security descriptor or " 217 dev_err(dev, "Can't read security descriptor or "
218 "not enough data: %d\n", result); 218 "not enough data: %d\n", result);
219 goto out; 219 goto out;
diff --git a/drivers/usb/wusbcore/wusbhc.c b/drivers/usb/wusbcore/wusbhc.c
index ee6256f23636..eab86e4bc770 100644
--- a/drivers/usb/wusbcore/wusbhc.c
+++ b/drivers/usb/wusbcore/wusbhc.c
@@ -147,10 +147,40 @@ static ssize_t wusb_chid_store(struct device *dev,
147} 147}
148static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store); 148static DEVICE_ATTR(wusb_chid, 0644, wusb_chid_show, wusb_chid_store);
149 149
150
151static ssize_t wusb_phy_rate_show(struct device *dev,
152 struct device_attribute *attr,
153 char *buf)
154{
155 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
156
157 return sprintf(buf, "%d\n", wusbhc->phy_rate);
158}
159
160static ssize_t wusb_phy_rate_store(struct device *dev,
161 struct device_attribute *attr,
162 const char *buf, size_t size)
163{
164 struct wusbhc *wusbhc = usbhc_dev_to_wusbhc(dev);
165 uint8_t phy_rate;
166 ssize_t result;
167
168 result = sscanf(buf, "%hhu", &phy_rate);
169 if (result != 1)
170 return -EINVAL;
171 if (phy_rate >= UWB_PHY_RATE_INVALID)
172 return -EINVAL;
173
174 wusbhc->phy_rate = phy_rate;
175 return size;
176}
177static DEVICE_ATTR(wusb_phy_rate, 0644, wusb_phy_rate_show, wusb_phy_rate_store);
178
150/* Group all the WUSBHC attributes */ 179/* Group all the WUSBHC attributes */
151static struct attribute *wusbhc_attrs[] = { 180static struct attribute *wusbhc_attrs[] = {
152 &dev_attr_wusb_trust_timeout.attr, 181 &dev_attr_wusb_trust_timeout.attr,
153 &dev_attr_wusb_chid.attr, 182 &dev_attr_wusb_chid.attr,
183 &dev_attr_wusb_phy_rate.attr,
154 NULL, 184 NULL,
155}; 185};
156 186
@@ -177,6 +207,8 @@ int wusbhc_create(struct wusbhc *wusbhc)
177 int result = 0; 207 int result = 0;
178 208
179 wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS; 209 wusbhc->trust_timeout = WUSB_TRUST_TIMEOUT_MS;
210 wusbhc->phy_rate = UWB_PHY_RATE_INVALID - 1;
211
180 mutex_init(&wusbhc->mutex); 212 mutex_init(&wusbhc->mutex);
181 result = wusbhc_mmcie_create(wusbhc); 213 result = wusbhc_mmcie_create(wusbhc);
182 if (result < 0) 214 if (result < 0)
diff --git a/drivers/usb/wusbcore/wusbhc.h b/drivers/usb/wusbcore/wusbhc.h
index 797c2453a35b..fd2fd4e277e1 100644
--- a/drivers/usb/wusbcore/wusbhc.h
+++ b/drivers/usb/wusbcore/wusbhc.h
@@ -253,6 +253,7 @@ struct wusbhc {
253 253
254 unsigned trust_timeout; /* in jiffies */ 254 unsigned trust_timeout; /* in jiffies */
255 struct wusb_ckhdid chid; 255 struct wusb_ckhdid chid;
256 uint8_t phy_rate;
256 struct wuie_host_info *wuie_host_info; 257 struct wuie_host_info *wuie_host_info;
257 258
258 struct mutex mutex; /* locks everything else */ 259 struct mutex mutex; /* locks everything else */
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index e9f193e6b27e..bb5fbed89e7f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2165,6 +2165,7 @@ config FB_BROADSHEET
2165 a bridge adapter. 2165 a bridge adapter.
2166 2166
2167source "drivers/video/omap/Kconfig" 2167source "drivers/video/omap/Kconfig"
2168source "drivers/video/omap2/Kconfig"
2168 2169
2169source "drivers/video/backlight/Kconfig" 2170source "drivers/video/backlight/Kconfig"
2170source "drivers/video/display/Kconfig" 2171source "drivers/video/display/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 80232e124889..0f8da331ba0f 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -124,6 +124,7 @@ obj-$(CONFIG_FB_SM501) += sm501fb.o
124obj-$(CONFIG_FB_XILINX) += xilinxfb.o 124obj-$(CONFIG_FB_XILINX) += xilinxfb.o
125obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o 125obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
126obj-$(CONFIG_FB_OMAP) += omap/ 126obj-$(CONFIG_FB_OMAP) += omap/
127obj-y += omap2/
127obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o 128obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
128obj-$(CONFIG_FB_CARMINE) += carminefb.o 129obj-$(CONFIG_FB_CARMINE) += carminefb.o
129obj-$(CONFIG_FB_MB862XX) += mb862xx/ 130obj-$(CONFIG_FB_MB862XX) += mb862xx/
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 551e3e9c4cbe..455c6055325d 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -1,6 +1,7 @@
1config FB_OMAP 1config FB_OMAP
2 tristate "OMAP frame buffer support (EXPERIMENTAL)" 2 tristate "OMAP frame buffer support (EXPERIMENTAL)"
3 depends on FB && ARCH_OMAP 3 depends on FB && ARCH_OMAP && (OMAP2_DSS = "n")
4
4 select FB_CFB_FILLRECT 5 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA 6 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT 7 select FB_CFB_IMAGEBLIT
@@ -72,7 +73,7 @@ config FB_OMAP_LCD_MIPID
72 73
73config FB_OMAP_BOOTLOADER_INIT 74config FB_OMAP_BOOTLOADER_INIT
74 bool "Check bootloader initialization" 75 bool "Check bootloader initialization"
75 depends on FB_OMAP 76 depends on FB_OMAP || FB_OMAP2
76 help 77 help
77 Say Y here if you want to enable checking if the bootloader has 78 Say Y here if you want to enable checking if the bootloader has
78 already initialized the display controller. In this case the 79 already initialized the display controller. In this case the
diff --git a/drivers/video/omap/blizzard.c b/drivers/video/omap/blizzard.c
index f5d75f22cef9..2ffb34af4c59 100644
--- a/drivers/video/omap/blizzard.c
+++ b/drivers/video/omap/blizzard.c
@@ -27,9 +27,9 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28 28
29#include <plat/dma.h> 29#include <plat/dma.h>
30#include <plat/omapfb.h>
31#include <plat/blizzard.h> 30#include <plat/blizzard.h>
32 31
32#include "omapfb.h"
33#include "dispc.h" 33#include "dispc.h"
34 34
35#define MODULE_NAME "blizzard" 35#define MODULE_NAME "blizzard"
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 7c833db4f9b7..c7c6455f1fa8 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -24,11 +24,12 @@
24#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/platform_device.h>
27 28
28#include <plat/sram.h> 29#include <plat/sram.h>
29#include <plat/omapfb.h>
30#include <plat/board.h> 30#include <plat/board.h>
31 31
32#include "omapfb.h"
32#include "dispc.h" 33#include "dispc.h"
33 34
34#define MODULE_NAME "dispc" 35#define MODULE_NAME "dispc"
@@ -188,6 +189,11 @@ static struct {
188 struct omapfb_color_key color_key; 189 struct omapfb_color_key color_key;
189} dispc; 190} dispc;
190 191
192static struct platform_device omapdss_device = {
193 .name = "omapdss",
194 .id = -1,
195};
196
191static void enable_lcd_clocks(int enable); 197static void enable_lcd_clocks(int enable);
192 198
193static void inline dispc_write_reg(int idx, u32 val) 199static void inline dispc_write_reg(int idx, u32 val)
@@ -914,20 +920,20 @@ static irqreturn_t omap_dispc_irq_handler(int irq, void *dev)
914 920
915static int get_dss_clocks(void) 921static int get_dss_clocks(void)
916{ 922{
917 dispc.dss_ick = clk_get(dispc.fbdev->dev, "ick"); 923 dispc.dss_ick = clk_get(&omapdss_device.dev, "ick");
918 if (IS_ERR(dispc.dss_ick)) { 924 if (IS_ERR(dispc.dss_ick)) {
919 dev_err(dispc.fbdev->dev, "can't get ick\n"); 925 dev_err(dispc.fbdev->dev, "can't get ick\n");
920 return PTR_ERR(dispc.dss_ick); 926 return PTR_ERR(dispc.dss_ick);
921 } 927 }
922 928
923 dispc.dss1_fck = clk_get(dispc.fbdev->dev, "dss1_fck"); 929 dispc.dss1_fck = clk_get(&omapdss_device.dev, "dss1_fck");
924 if (IS_ERR(dispc.dss1_fck)) { 930 if (IS_ERR(dispc.dss1_fck)) {
925 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n"); 931 dev_err(dispc.fbdev->dev, "can't get dss1_fck\n");
926 clk_put(dispc.dss_ick); 932 clk_put(dispc.dss_ick);
927 return PTR_ERR(dispc.dss1_fck); 933 return PTR_ERR(dispc.dss1_fck);
928 } 934 }
929 935
930 dispc.dss_54m_fck = clk_get(dispc.fbdev->dev, "tv_fck"); 936 dispc.dss_54m_fck = clk_get(&omapdss_device.dev, "tv_fck");
931 if (IS_ERR(dispc.dss_54m_fck)) { 937 if (IS_ERR(dispc.dss_54m_fck)) {
932 dev_err(dispc.fbdev->dev, "can't get tv_fck\n"); 938 dev_err(dispc.fbdev->dev, "can't get tv_fck\n");
933 clk_put(dispc.dss_ick); 939 clk_put(dispc.dss_ick);
@@ -1379,6 +1385,12 @@ static int omap_dispc_init(struct omapfb_device *fbdev, int ext_mode,
1379 int skip_init = 0; 1385 int skip_init = 0;
1380 int i; 1386 int i;
1381 1387
1388 r = platform_device_register(&omapdss_device);
1389 if (r) {
1390 dev_err(fbdev->dev, "can't register omapdss device\n");
1391 return r;
1392 }
1393
1382 memset(&dispc, 0, sizeof(dispc)); 1394 memset(&dispc, 0, sizeof(dispc));
1383 1395
1384 dispc.base = ioremap(DISPC_BASE, SZ_1K); 1396 dispc.base = ioremap(DISPC_BASE, SZ_1K);
@@ -1522,6 +1534,7 @@ static void omap_dispc_cleanup(void)
1522 free_irq(INT_24XX_DSS_IRQ, dispc.fbdev); 1534 free_irq(INT_24XX_DSS_IRQ, dispc.fbdev);
1523 put_dss_clocks(); 1535 put_dss_clocks();
1524 iounmap(dispc.base); 1536 iounmap(dispc.base);
1537 platform_device_unregister(&omapdss_device);
1525} 1538}
1526 1539
1527const struct lcd_ctrl omap2_int_ctrl = { 1540const struct lcd_ctrl omap2_int_ctrl = {
diff --git a/drivers/video/omap/hwa742.c b/drivers/video/omap/hwa742.c
index 17a975e4c9c9..0016f77cd13f 100644
--- a/drivers/video/omap/hwa742.c
+++ b/drivers/video/omap/hwa742.c
@@ -25,10 +25,11 @@
25#include <linux/fb.h> 25#include <linux/fb.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/interrupt.h>
28 29
29#include <plat/dma.h> 30#include <plat/dma.h>
30#include <plat/omapfb.h>
31#include <plat/hwa742.h> 31#include <plat/hwa742.h>
32#include "omapfb.h"
32 33
33#define HWA742_REV_CODE_REG 0x0 34#define HWA742_REV_CODE_REG 0x0
34#define HWA742_CONFIG_REG 0x2 35#define HWA742_CONFIG_REG 0x2
diff --git a/drivers/video/omap/lcd_2430sdp.c b/drivers/video/omap/lcd_2430sdp.c
index fea7feee0b77..760645d9dbb6 100644
--- a/drivers/video/omap/lcd_2430sdp.c
+++ b/drivers/video/omap/lcd_2430sdp.c
@@ -28,9 +28,10 @@
28#include <linux/i2c/twl4030.h> 28#include <linux/i2c/twl4030.h>
29 29
30#include <plat/mux.h> 30#include <plat/mux.h>
31#include <plat/omapfb.h>
32#include <asm/mach-types.h> 31#include <asm/mach-types.h>
33 32
33#include "omapfb.h"
34
34#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91 35#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91
35#define SDP2430_LCD_PANEL_ENABLE_GPIO 154 36#define SDP2430_LCD_PANEL_ENABLE_GPIO 154
36#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24 37#define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 24
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index b3973ebd1b0f..567db6ac32c8 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -27,7 +27,8 @@
27 27
28#include <plat/board-ams-delta.h> 28#include <plat/board-ams-delta.h>
29#include <mach/hardware.h> 29#include <mach/hardware.h>
30#include <plat/omapfb.h> 30
31#include "omapfb.h"
31 32
32#define AMS_DELTA_DEFAULT_CONTRAST 112 33#define AMS_DELTA_DEFAULT_CONTRAST 112
33 34
diff --git a/drivers/video/omap/lcd_apollon.c b/drivers/video/omap/lcd_apollon.c
index 4c5cefc5153b..2be94eb3bbf5 100644
--- a/drivers/video/omap/lcd_apollon.c
+++ b/drivers/video/omap/lcd_apollon.c
@@ -26,7 +26,8 @@
26 26
27#include <mach/gpio.h> 27#include <mach/gpio.h>
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <plat/omapfb.h> 29
30#include "omapfb.h"
30 31
31/* #define USE_35INCH_LCD 1 */ 32/* #define USE_35INCH_LCD 1 */
32 33
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 240b4fb10741..8df688748b5a 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -24,7 +24,7 @@
24#include <linux/i2c/tps65010.h> 24#include <linux/i2c/tps65010.h>
25 25
26#include <mach/gpio.h> 26#include <mach/gpio.h>
27#include <plat/omapfb.h> 27#include "omapfb.h"
28 28
29#define MODULE_NAME "omapfb-lcd_h3" 29#define MODULE_NAME "omapfb-lcd_h3"
30 30
diff --git a/drivers/video/omap/lcd_h4.c b/drivers/video/omap/lcd_h4.c
index 720625da1f4e..03a06a982750 100644
--- a/drivers/video/omap/lcd_h4.c
+++ b/drivers/video/omap/lcd_h4.c
@@ -22,7 +22,7 @@
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include <plat/omapfb.h> 25#include "omapfb.h"
26 26
27static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) 27static int h4_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
28{ 28{
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index 2e0c81ea7483..a9007c5d1fad 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -29,7 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31 31
32#include <plat/omapfb.h> 32#include "omapfb.h"
33 33
34static int htcherald_panel_init(struct lcd_panel *panel, 34static int htcherald_panel_init(struct lcd_panel *panel,
35 struct omapfb_device *fbdev) 35 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index aafe9b497e2d..3271f1643b26 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -24,7 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <plat/fpga.h> 26#include <plat/fpga.h>
27#include <plat/omapfb.h> 27#include "omapfb.h"
28 28
29static int innovator1510_panel_init(struct lcd_panel *panel, 29static int innovator1510_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev) 30 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 0de338264a8a..9fff86f67bde 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -23,7 +23,7 @@
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#include <mach/gpio.h> 25#include <mach/gpio.h>
26#include <plat/omapfb.h> 26#include "omapfb.h"
27 27
28#define MODULE_NAME "omapfb-lcd_h3" 28#define MODULE_NAME "omapfb-lcd_h3"
29 29
diff --git a/drivers/video/omap/lcd_ldp.c b/drivers/video/omap/lcd_ldp.c
index 6a260dfdadc5..5bb7f6f14601 100644
--- a/drivers/video/omap/lcd_ldp.c
+++ b/drivers/video/omap/lcd_ldp.c
@@ -28,9 +28,10 @@
28 28
29#include <mach/gpio.h> 29#include <mach/gpio.h>
30#include <plat/mux.h> 30#include <plat/mux.h>
31#include <plat/omapfb.h>
32#include <asm/mach-types.h> 31#include <asm/mach-types.h>
33 32
33#include "omapfb.h"
34
34#define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES) 35#define LCD_PANEL_BACKLIGHT_GPIO (15 + OMAP_MAX_GPIO_LINES)
35#define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES) 36#define LCD_PANEL_ENABLE_GPIO (7 + OMAP_MAX_GPIO_LINES)
36 37
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index 8f3e2b4bb4f3..abe1c76a3257 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -23,9 +23,10 @@
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25 25
26#include <plat/omapfb.h>
27#include <plat/lcd_mipid.h> 26#include <plat/lcd_mipid.h>
28 27
28#include "omapfb.h"
29
29#define MIPID_MODULE_NAME "lcd_mipid" 30#define MIPID_MODULE_NAME "lcd_mipid"
30 31
31#define MIPID_CMD_READ_DISP_ID 0x04 32#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c
index e1a38abca3e7..006c2fe7360e 100644
--- a/drivers/video/omap/lcd_omap2evm.c
+++ b/drivers/video/omap/lcd_omap2evm.c
@@ -27,9 +27,10 @@
27#include <linux/i2c/twl4030.h> 27#include <linux/i2c/twl4030.h>
28 28
29#include <plat/mux.h> 29#include <plat/mux.h>
30#include <plat/omapfb.h>
31#include <asm/mach-types.h> 30#include <asm/mach-types.h>
32 31
32#include "omapfb.h"
33
33#define LCD_PANEL_ENABLE_GPIO 154 34#define LCD_PANEL_ENABLE_GPIO 154
34#define LCD_PANEL_LR 128 35#define LCD_PANEL_LR 128
35#define LCD_PANEL_UD 129 36#define LCD_PANEL_UD 129
diff --git a/drivers/video/omap/lcd_omap3beagle.c b/drivers/video/omap/lcd_omap3beagle.c
index ccec084ed647..fc503d8f3c24 100644
--- a/drivers/video/omap/lcd_omap3beagle.c
+++ b/drivers/video/omap/lcd_omap3beagle.c
@@ -26,9 +26,11 @@
26#include <linux/i2c/twl4030.h> 26#include <linux/i2c/twl4030.h>
27 27
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <plat/omapfb.h> 29#include <plat/mux.h>
30#include <asm/mach-types.h> 30#include <asm/mach-types.h>
31 31
32#include "omapfb.h"
33
32#define LCD_PANEL_ENABLE_GPIO 170 34#define LCD_PANEL_ENABLE_GPIO 170
33 35
34static int omap3beagle_panel_init(struct lcd_panel *panel, 36static int omap3beagle_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_omap3evm.c b/drivers/video/omap/lcd_omap3evm.c
index 556eb31db24c..ae2edc4081a8 100644
--- a/drivers/video/omap/lcd_omap3evm.c
+++ b/drivers/video/omap/lcd_omap3evm.c
@@ -26,9 +26,10 @@
26#include <linux/i2c/twl4030.h> 26#include <linux/i2c/twl4030.h>
27 27
28#include <plat/mux.h> 28#include <plat/mux.h>
29#include <plat/omapfb.h>
30#include <asm/mach-types.h> 29#include <asm/mach-types.h>
31 30
31#include "omapfb.h"
32
32#define LCD_PANEL_ENABLE_GPIO 153 33#define LCD_PANEL_ENABLE_GPIO 153
33#define LCD_PANEL_LR 2 34#define LCD_PANEL_LR 2
34#define LCD_PANEL_UD 3 35#define LCD_PANEL_UD 3
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index bb21d7dca39e..b87e8b83f29c 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -25,7 +25,7 @@
25 25
26#include <mach/gpio.h> 26#include <mach/gpio.h>
27#include <plat/mux.h> 27#include <plat/mux.h>
28#include <plat/omapfb.h> 28#include "omapfb.h"
29 29
30static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev) 30static int osk_panel_init(struct lcd_panel *panel, struct omapfb_device *fbdev)
31{ 31{
diff --git a/drivers/video/omap/lcd_overo.c b/drivers/video/omap/lcd_overo.c
index b0f86e514cde..56ee192e9ee2 100644
--- a/drivers/video/omap/lcd_overo.c
+++ b/drivers/video/omap/lcd_overo.c
@@ -25,9 +25,10 @@
25 25
26#include <mach/gpio.h> 26#include <mach/gpio.h>
27#include <plat/mux.h> 27#include <plat/mux.h>
28#include <plat/omapfb.h>
29#include <asm/mach-types.h> 28#include <asm/mach-types.h>
30 29
30#include "omapfb.h"
31
31#define LCD_ENABLE 144 32#define LCD_ENABLE 144
32 33
33static int overo_panel_init(struct lcd_panel *panel, 34static int overo_panel_init(struct lcd_panel *panel,
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index d30289603ce8..4cb301750d02 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -24,7 +24,7 @@
24#include <linux/io.h> 24#include <linux/io.h>
25 25
26#include <plat/fpga.h> 26#include <plat/fpga.h>
27#include <plat/omapfb.h> 27#include "omapfb.h"
28 28
29static int palmte_panel_init(struct lcd_panel *panel, 29static int palmte_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev) 30 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index 557424fb6df1..ff0e6d7ab3a2 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -30,7 +30,7 @@ GPIO13 - screen blanking
30#include <linux/io.h> 30#include <linux/io.h>
31 31
32#include <mach/gpio.h> 32#include <mach/gpio.h>
33#include <plat/omapfb.h> 33#include "omapfb.h"
34 34
35static int palmtt_panel_init(struct lcd_panel *panel, 35static int palmtt_panel_init(struct lcd_panel *panel,
36 struct omapfb_device *fbdev) 36 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index 5f4b5b2c1f41..2334e56536bc 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -24,7 +24,7 @@
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/io.h> 25#include <linux/io.h>
26 26
27#include <plat/omapfb.h> 27#include "omapfb.h"
28 28
29static int palmz71_panel_init(struct lcd_panel *panel, 29static int palmz71_panel_init(struct lcd_panel *panel,
30 struct omapfb_device *fbdev) 30 struct omapfb_device *fbdev)
diff --git a/drivers/video/omap/lcdc.c b/drivers/video/omap/lcdc.c
index 5f32cafbf74c..b831e1df629e 100644
--- a/drivers/video/omap/lcdc.c
+++ b/drivers/video/omap/lcdc.c
@@ -30,10 +30,11 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31 31
32#include <plat/dma.h> 32#include <plat/dma.h>
33#include <plat/omapfb.h>
34 33
35#include <asm/mach-types.h> 34#include <asm/mach-types.h>
36 35
36#include "omapfb.h"
37
37#include "lcdc.h" 38#include "lcdc.h"
38 39
39#define MODULE_NAME "lcdc" 40#define MODULE_NAME "lcdc"
diff --git a/arch/arm/plat-omap/include/plat/omapfb.h b/drivers/video/omap/omapfb.h
index bfef7ab95f17..46e4714014e8 100644
--- a/arch/arm/plat-omap/include/plat/omapfb.h
+++ b/drivers/video/omap/omapfb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * File: arch/arm/plat-omap/include/mach/omapfb.h 2 * File: drivers/video/omap/omapfb.h
3 * 3 *
4 * Framebuffer driver for TI OMAP boards 4 * Framebuffer driver for TI OMAP boards
5 * 5 *
@@ -24,151 +24,12 @@
24#ifndef __OMAPFB_H 24#ifndef __OMAPFB_H
25#define __OMAPFB_H 25#define __OMAPFB_H
26 26
27#include <asm/ioctl.h>
28#include <asm/types.h>
29
30/* IOCTL commands. */
31
32#define OMAP_IOW(num, dtype) _IOW('O', num, dtype)
33#define OMAP_IOR(num, dtype) _IOR('O', num, dtype)
34#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype)
35#define OMAP_IO(num) _IO('O', num)
36
37#define OMAPFB_MIRROR OMAP_IOW(31, int)
38#define OMAPFB_SYNC_GFX OMAP_IO(37)
39#define OMAPFB_VSYNC OMAP_IO(38)
40#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int)
41#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps)
42#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int)
43#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
44#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
45#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
46#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key)
47#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key)
48#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info)
49#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info)
50#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window)
51#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info)
52#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info)
53
54#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
55#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
56#define OMAPFB_CAPS_PANEL_MASK 0xff000000
57
58#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000
59#define OMAPFB_CAPS_TEARSYNC 0x00002000
60#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000
61#define OMAPFB_CAPS_PLANE_SCALE 0x00008000
62#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000
63#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000
64#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000
65#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000
66#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000
67
68/* Values from DSP must map to lower 16-bits */
69#define OMAPFB_FORMAT_MASK 0x00ff
70#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100
71#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200
72#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400
73#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800
74#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000
75
76#define OMAPFB_EVENT_READY 1
77#define OMAPFB_EVENT_DISABLED 2
78
79#define OMAPFB_MEMTYPE_SDRAM 0
80#define OMAPFB_MEMTYPE_SRAM 1
81#define OMAPFB_MEMTYPE_MAX 1
82
83enum omapfb_color_format {
84 OMAPFB_COLOR_RGB565 = 0,
85 OMAPFB_COLOR_YUV422,
86 OMAPFB_COLOR_YUV420,
87 OMAPFB_COLOR_CLUT_8BPP,
88 OMAPFB_COLOR_CLUT_4BPP,
89 OMAPFB_COLOR_CLUT_2BPP,
90 OMAPFB_COLOR_CLUT_1BPP,
91 OMAPFB_COLOR_RGB444,
92 OMAPFB_COLOR_YUY422,
93};
94
95struct omapfb_update_window {
96 __u32 x, y;
97 __u32 width, height;
98 __u32 format;
99 __u32 out_x, out_y;
100 __u32 out_width, out_height;
101 __u32 reserved[8];
102};
103
104struct omapfb_update_window_old {
105 __u32 x, y;
106 __u32 width, height;
107 __u32 format;
108};
109
110enum omapfb_plane {
111 OMAPFB_PLANE_GFX = 0,
112 OMAPFB_PLANE_VID1,
113 OMAPFB_PLANE_VID2,
114};
115
116enum omapfb_channel_out {
117 OMAPFB_CHANNEL_OUT_LCD = 0,
118 OMAPFB_CHANNEL_OUT_DIGIT,
119};
120
121struct omapfb_plane_info {
122 __u32 pos_x;
123 __u32 pos_y;
124 __u8 enabled;
125 __u8 channel_out;
126 __u8 mirror;
127 __u8 reserved1;
128 __u32 out_width;
129 __u32 out_height;
130 __u32 reserved2[12];
131};
132
133struct omapfb_mem_info {
134 __u32 size;
135 __u8 type;
136 __u8 reserved[3];
137};
138
139struct omapfb_caps {
140 __u32 ctrl;
141 __u32 plane_color;
142 __u32 wnd_color;
143};
144
145enum omapfb_color_key_type {
146 OMAPFB_COLOR_KEY_DISABLED = 0,
147 OMAPFB_COLOR_KEY_GFX_DST,
148 OMAPFB_COLOR_KEY_VID_SRC,
149};
150
151struct omapfb_color_key {
152 __u8 channel_out;
153 __u32 background;
154 __u32 trans_key;
155 __u8 key_type;
156};
157
158enum omapfb_update_mode {
159 OMAPFB_UPDATE_DISABLED = 0,
160 OMAPFB_AUTO_UPDATE,
161 OMAPFB_MANUAL_UPDATE
162};
163
164#ifdef __KERNEL__
165
166#include <linux/completion.h>
167#include <linux/interrupt.h>
168#include <linux/fb.h> 27#include <linux/fb.h>
169#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/omapfb.h>
170 30
171#include <plat/board.h> 31#define OMAPFB_EVENT_READY 1
32#define OMAPFB_EVENT_DISABLED 2
172 33
173#define OMAP_LCDC_INV_VSYNC 0x0001 34#define OMAP_LCDC_INV_VSYNC 0x0001
174#define OMAP_LCDC_INV_HSYNC 0x0002 35#define OMAP_LCDC_INV_HSYNC 0x0002
@@ -184,12 +45,6 @@ enum omapfb_update_mode {
184#define OMAPFB_PLANE_XRES_MIN 8 45#define OMAPFB_PLANE_XRES_MIN 8
185#define OMAPFB_PLANE_YRES_MIN 8 46#define OMAPFB_PLANE_YRES_MIN 8
186 47
187#ifdef CONFIG_ARCH_OMAP1
188#define OMAPFB_PLANE_NUM 1
189#else
190#define OMAPFB_PLANE_NUM 3
191#endif
192
193struct omapfb_device; 48struct omapfb_device;
194 49
195struct lcd_panel { 50struct lcd_panel {
@@ -256,7 +111,7 @@ struct lcd_ctrl_extif {
256 void (*read_data) (void *buf, unsigned int len); 111 void (*read_data) (void *buf, unsigned int len);
257 void (*write_data) (const void *buf, unsigned int len); 112 void (*write_data) (const void *buf, unsigned int len);
258 void (*transfer_area) (int width, int height, 113 void (*transfer_area) (int width, int height,
259 void (callback)(void * data), void *data); 114 void (callback)(void *data), void *data);
260 int (*setup_tearsync) (unsigned pin_cnt, 115 int (*setup_tearsync) (unsigned pin_cnt,
261 unsigned hs_pulse_time, unsigned vs_pulse_time, 116 unsigned hs_pulse_time, unsigned vs_pulse_time,
262 int hs_pol_inv, int vs_pol_inv, int div); 117 int hs_pol_inv, int vs_pol_inv, int div);
@@ -275,20 +130,6 @@ typedef int (*omapfb_notifier_callback_t)(struct notifier_block *,
275 unsigned long event, 130 unsigned long event,
276 void *fbi); 131 void *fbi);
277 132
278struct omapfb_mem_region {
279 u32 paddr;
280 void __iomem *vaddr;
281 unsigned long size;
282 u8 type; /* OMAPFB_PLANE_MEM_* */
283 unsigned alloc:1; /* allocated by the driver */
284 unsigned map:1; /* kernel mapped by the driver */
285};
286
287struct omapfb_mem_desc {
288 int region_cnt;
289 struct omapfb_mem_region region[OMAPFB_PLANE_NUM];
290};
291
292struct lcd_ctrl { 133struct lcd_ctrl {
293 const char *name; 134 const char *name;
294 void *data; 135 void *data;
@@ -331,9 +172,9 @@ struct lcd_ctrl {
331}; 172};
332 173
333enum omapfb_state { 174enum omapfb_state {
334 OMAPFB_DISABLED = 0, 175 OMAPFB_DISABLED = 0,
335 OMAPFB_SUSPENDED= 99, 176 OMAPFB_SUSPENDED = 99,
336 OMAPFB_ACTIVE = 100 177 OMAPFB_ACTIVE = 100
337}; 178};
338 179
339struct omapfb_plane_struct { 180struct omapfb_plane_struct {
@@ -345,8 +186,8 @@ struct omapfb_plane_struct {
345 186
346struct omapfb_device { 187struct omapfb_device {
347 int state; 188 int state;
348 int ext_lcdc; /* Using external 189 int ext_lcdc; /* Using external
349 LCD controller */ 190 LCD controller */
350 struct mutex rqueue_mutex; 191 struct mutex rqueue_mutex;
351 192
352 int palette_size; 193 int palette_size;
@@ -364,19 +205,12 @@ struct omapfb_device {
364 struct fb_info *fb_info[OMAPFB_PLANE_NUM]; 205 struct fb_info *fb_info[OMAPFB_PLANE_NUM];
365}; 206};
366 207
367struct omapfb_platform_data {
368 struct omap_lcd_config lcd;
369 struct omapfb_mem_desc mem_desc;
370 void *ctrl_platform_data;
371};
372
373#ifdef CONFIG_ARCH_OMAP1 208#ifdef CONFIG_ARCH_OMAP1
374extern struct lcd_ctrl omap1_lcd_ctrl; 209extern struct lcd_ctrl omap1_lcd_ctrl;
375#else 210#else
376extern struct lcd_ctrl omap2_disp_ctrl; 211extern struct lcd_ctrl omap2_disp_ctrl;
377#endif 212#endif
378 213
379extern void omapfb_reserve_sdram(void);
380extern void omapfb_register_panel(struct lcd_panel *panel); 214extern void omapfb_register_panel(struct lcd_panel *panel);
381extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval); 215extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
382extern void omapfb_notify_clients(struct omapfb_device *fbdev, 216extern void omapfb_notify_clients(struct omapfb_device *fbdev,
@@ -390,9 +224,4 @@ extern int omapfb_update_window_async(struct fb_info *fbi,
390 void (*callback)(void *), 224 void (*callback)(void *),
391 void *callback_data); 225 void *callback_data);
392 226
393/* in arch/arm/plat-omap/fb.c */
394extern void omapfb_set_ctrl_platform_data(void *pdata);
395
396#endif /* __KERNEL__ */
397
398#endif /* __OMAPFB_H */ 227#endif /* __OMAPFB_H */
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index f900a43db8d7..c7f59a5ccdbc 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -29,8 +29,8 @@
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30 30
31#include <plat/dma.h> 31#include <plat/dma.h>
32#include <plat/omapfb.h>
33 32
33#include "omapfb.h"
34#include "lcdc.h" 34#include "lcdc.h"
35#include "dispc.h" 35#include "dispc.h"
36 36
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index c90fa39486b4..fed7b1bda19c 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -27,8 +27,7 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29 29
30#include <plat/omapfb.h> 30#include "omapfb.h"
31
32#include "dispc.h" 31#include "dispc.h"
33 32
34/* To work around an RFBI transfer rate limitation */ 33/* To work around an RFBI transfer rate limitation */
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 79dc84f09713..8fb7c708f563 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -23,10 +23,11 @@
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/interrupt.h>
26 27
27#include <plat/dma.h> 28#include <plat/dma.h>
28#include <plat/omapfb.h>
29 29
30#include "omapfb.h"
30#include "lcdc.h" 31#include "lcdc.h"
31 32
32#define MODULE_NAME "omapfb-sossi" 33#define MODULE_NAME "omapfb-sossi"
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
new file mode 100644
index 000000000000..d877c361abda
--- /dev/null
+++ b/drivers/video/omap2/Kconfig
@@ -0,0 +1,9 @@
1config OMAP2_VRAM
2 bool
3
4config OMAP2_VRFB
5 bool
6
7source "drivers/video/omap2/dss/Kconfig"
8source "drivers/video/omap2/omapfb/Kconfig"
9source "drivers/video/omap2/displays/Kconfig"
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
new file mode 100644
index 000000000000..d853d05dad31
--- /dev/null
+++ b/drivers/video/omap2/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_OMAP2_VRAM) += vram.o
2obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
3
4obj-y += dss/
5obj-y += omapfb/
6obj-y += displays/
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
new file mode 100644
index 000000000000..b12a59c9c50a
--- /dev/null
+++ b/drivers/video/omap2/displays/Kconfig
@@ -0,0 +1,22 @@
1menu "OMAP2/3 Display Device Drivers"
2 depends on OMAP2_DSS
3
4config PANEL_GENERIC
5 tristate "Generic Panel"
6 help
7 Generic panel driver.
8 Used for DVI output for Beagle and OMAP3 SDP.
9
10config PANEL_SHARP_LS037V7DW01
11 tristate "Sharp LS037V7DW01 LCD Panel"
12 depends on OMAP2_DSS
13 help
14 LCD Panel used in TI's SDP3430 and EVM boards
15
16config PANEL_TAAL
17 tristate "Taal DSI Panel"
18 depends on OMAP2_DSS_DSI
19 help
20 Taal DSI command mode panel from TPO.
21
22endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
new file mode 100644
index 000000000000..955646440b3a
--- /dev/null
+++ b/drivers/video/omap2/displays/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
2obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
3
4obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
new file mode 100644
index 000000000000..eb48d1afd800
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-generic.c
@@ -0,0 +1,104 @@
1/*
2 * Generic panel support
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/module.h>
21#include <linux/delay.h>
22
23#include <plat/display.h>
24
25static struct omap_video_timings generic_panel_timings = {
26 /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
27 .x_res = 640,
28 .y_res = 480,
29 .pixel_clock = 23500,
30 .hfp = 48,
31 .hsw = 32,
32 .hbp = 80,
33 .vfp = 3,
34 .vsw = 4,
35 .vbp = 7,
36};
37
38static int generic_panel_probe(struct omap_dss_device *dssdev)
39{
40 dssdev->panel.config = OMAP_DSS_LCD_TFT;
41 dssdev->panel.timings = generic_panel_timings;
42
43 return 0;
44}
45
46static void generic_panel_remove(struct omap_dss_device *dssdev)
47{
48}
49
50static int generic_panel_enable(struct omap_dss_device *dssdev)
51{
52 int r = 0;
53
54 if (dssdev->platform_enable)
55 r = dssdev->platform_enable(dssdev);
56
57 return r;
58}
59
60static void generic_panel_disable(struct omap_dss_device *dssdev)
61{
62 if (dssdev->platform_disable)
63 dssdev->platform_disable(dssdev);
64}
65
66static int generic_panel_suspend(struct omap_dss_device *dssdev)
67{
68 generic_panel_disable(dssdev);
69 return 0;
70}
71
72static int generic_panel_resume(struct omap_dss_device *dssdev)
73{
74 return generic_panel_enable(dssdev);
75}
76
77static struct omap_dss_driver generic_driver = {
78 .probe = generic_panel_probe,
79 .remove = generic_panel_remove,
80
81 .enable = generic_panel_enable,
82 .disable = generic_panel_disable,
83 .suspend = generic_panel_suspend,
84 .resume = generic_panel_resume,
85
86 .driver = {
87 .name = "generic_panel",
88 .owner = THIS_MODULE,
89 },
90};
91
92static int __init generic_panel_drv_init(void)
93{
94 return omap_dss_register_driver(&generic_driver);
95}
96
97static void __exit generic_panel_drv_exit(void)
98{
99 omap_dss_unregister_driver(&generic_driver);
100}
101
102module_init(generic_panel_drv_init);
103module_exit(generic_panel_drv_exit);
104MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
new file mode 100644
index 000000000000..bbe880bbe795
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -0,0 +1,153 @@
1/*
2 * LCD panel driver for Sharp LS037V7DW01
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/module.h>
21#include <linux/delay.h>
22#include <linux/device.h>
23#include <linux/regulator/consumer.h>
24#include <linux/err.h>
25
26#include <plat/display.h>
27
28struct sharp_data {
29 /* XXX This regulator should actually be in SDP board file, not here,
30 * as it doesn't actually power the LCD, but something else that
31 * affects the output to LCD (I think. Somebody clarify). It doesn't do
32 * harm here, as SDP is the only board using this currently */
33 struct regulator *vdvi_reg;
34};
35
36static struct omap_video_timings sharp_ls_timings = {
37 .x_res = 480,
38 .y_res = 640,
39
40 .pixel_clock = 19200,
41
42 .hsw = 2,
43 .hfp = 1,
44 .hbp = 28,
45
46 .vsw = 1,
47 .vfp = 1,
48 .vbp = 1,
49};
50
51static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
52{
53 struct sharp_data *sd;
54
55 dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
56 OMAP_DSS_LCD_IHS;
57 dssdev->panel.acb = 0x28;
58 dssdev->panel.timings = sharp_ls_timings;
59
60 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
61 if (!sd)
62 return -ENOMEM;
63
64 dev_set_drvdata(&dssdev->dev, sd);
65
66 sd->vdvi_reg = regulator_get(&dssdev->dev, "vdvi");
67 if (IS_ERR(sd->vdvi_reg)) {
68 kfree(sd);
69 pr_err("failed to get VDVI regulator\n");
70 return PTR_ERR(sd->vdvi_reg);
71 }
72
73 return 0;
74}
75
76static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
77{
78 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
79
80 regulator_put(sd->vdvi_reg);
81
82 kfree(sd);
83}
84
85static int sharp_ls_panel_enable(struct omap_dss_device *dssdev)
86{
87 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
88 int r = 0;
89
90 /* wait couple of vsyncs until enabling the LCD */
91 msleep(50);
92
93 regulator_enable(sd->vdvi_reg);
94
95 if (dssdev->platform_enable)
96 r = dssdev->platform_enable(dssdev);
97
98 return r;
99}
100
101static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
102{
103 struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
104
105 if (dssdev->platform_disable)
106 dssdev->platform_disable(dssdev);
107
108 regulator_disable(sd->vdvi_reg);
109
110 /* wait at least 5 vsyncs after disabling the LCD */
111
112 msleep(100);
113}
114
115static int sharp_ls_panel_suspend(struct omap_dss_device *dssdev)
116{
117 sharp_ls_panel_disable(dssdev);
118 return 0;
119}
120
121static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
122{
123 return sharp_ls_panel_enable(dssdev);
124}
125
126static struct omap_dss_driver sharp_ls_driver = {
127 .probe = sharp_ls_panel_probe,
128 .remove = sharp_ls_panel_remove,
129
130 .enable = sharp_ls_panel_enable,
131 .disable = sharp_ls_panel_disable,
132 .suspend = sharp_ls_panel_suspend,
133 .resume = sharp_ls_panel_resume,
134
135 .driver = {
136 .name = "sharp_ls_panel",
137 .owner = THIS_MODULE,
138 },
139};
140
141static int __init sharp_ls_panel_drv_init(void)
142{
143 return omap_dss_register_driver(&sharp_ls_driver);
144}
145
146static void __exit sharp_ls_panel_drv_exit(void)
147{
148 omap_dss_unregister_driver(&sharp_ls_driver);
149}
150
151module_init(sharp_ls_panel_drv_init);
152module_exit(sharp_ls_panel_drv_exit);
153MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
new file mode 100644
index 000000000000..1f01dfc5e52e
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -0,0 +1,1003 @@
1/*
2 * Taal DSI command mode panel
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20/*#define DEBUG*/
21
22#include <linux/module.h>
23#include <linux/delay.h>
24#include <linux/err.h>
25#include <linux/jiffies.h>
26#include <linux/sched.h>
27#include <linux/backlight.h>
28#include <linux/fb.h>
29#include <linux/interrupt.h>
30#include <linux/gpio.h>
31#include <linux/completion.h>
32#include <linux/workqueue.h>
33
34#include <plat/display.h>
35
36/* DSI Virtual channel. Hardcoded for now. */
37#define TCH 0
38
39#define DCS_READ_NUM_ERRORS 0x05
40#define DCS_READ_POWER_MODE 0x0a
41#define DCS_READ_MADCTL 0x0b
42#define DCS_READ_PIXEL_FORMAT 0x0c
43#define DCS_RDDSDR 0x0f
44#define DCS_SLEEP_IN 0x10
45#define DCS_SLEEP_OUT 0x11
46#define DCS_DISPLAY_OFF 0x28
47#define DCS_DISPLAY_ON 0x29
48#define DCS_COLUMN_ADDR 0x2a
49#define DCS_PAGE_ADDR 0x2b
50#define DCS_MEMORY_WRITE 0x2c
51#define DCS_TEAR_OFF 0x34
52#define DCS_TEAR_ON 0x35
53#define DCS_MEM_ACC_CTRL 0x36
54#define DCS_PIXEL_FORMAT 0x3a
55#define DCS_BRIGHTNESS 0x51
56#define DCS_CTRL_DISPLAY 0x53
57#define DCS_WRITE_CABC 0x55
58#define DCS_READ_CABC 0x56
59#define DCS_GET_ID1 0xda
60#define DCS_GET_ID2 0xdb
61#define DCS_GET_ID3 0xdc
62
63/* #define TAAL_USE_ESD_CHECK */
64#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
65
66struct taal_data {
67 struct backlight_device *bldev;
68
69 unsigned long hw_guard_end; /* next value of jiffies when we can
70 * issue the next sleep in/out command
71 */
72 unsigned long hw_guard_wait; /* max guard time in jiffies */
73
74 struct omap_dss_device *dssdev;
75
76 bool enabled;
77 u8 rotate;
78 bool mirror;
79
80 bool te_enabled;
81 bool use_ext_te;
82 struct completion te_completion;
83
84 bool use_dsi_bl;
85
86 bool cabc_broken;
87 unsigned cabc_mode;
88
89 bool intro_printed;
90
91 struct workqueue_struct *esd_wq;
92 struct delayed_work esd_work;
93};
94
95static void taal_esd_work(struct work_struct *work);
96
97static void hw_guard_start(struct taal_data *td, int guard_msec)
98{
99 td->hw_guard_wait = msecs_to_jiffies(guard_msec);
100 td->hw_guard_end = jiffies + td->hw_guard_wait;
101}
102
103static void hw_guard_wait(struct taal_data *td)
104{
105 unsigned long wait = td->hw_guard_end - jiffies;
106
107 if ((long)wait > 0 && wait <= td->hw_guard_wait) {
108 set_current_state(TASK_UNINTERRUPTIBLE);
109 schedule_timeout(wait);
110 }
111}
112
113static int taal_dcs_read_1(u8 dcs_cmd, u8 *data)
114{
115 int r;
116 u8 buf[1];
117
118 r = dsi_vc_dcs_read(TCH, dcs_cmd, buf, 1);
119
120 if (r < 0)
121 return r;
122
123 *data = buf[0];
124
125 return 0;
126}
127
128static int taal_dcs_write_0(u8 dcs_cmd)
129{
130 return dsi_vc_dcs_write(TCH, &dcs_cmd, 1);
131}
132
133static int taal_dcs_write_1(u8 dcs_cmd, u8 param)
134{
135 u8 buf[2];
136 buf[0] = dcs_cmd;
137 buf[1] = param;
138 return dsi_vc_dcs_write(TCH, buf, 2);
139}
140
141static int taal_sleep_in(struct taal_data *td)
142
143{
144 u8 cmd;
145 int r;
146
147 hw_guard_wait(td);
148
149 cmd = DCS_SLEEP_IN;
150 r = dsi_vc_dcs_write_nosync(TCH, &cmd, 1);
151 if (r)
152 return r;
153
154 hw_guard_start(td, 120);
155
156 msleep(5);
157
158 return 0;
159}
160
161static int taal_sleep_out(struct taal_data *td)
162{
163 int r;
164
165 hw_guard_wait(td);
166
167 r = taal_dcs_write_0(DCS_SLEEP_OUT);
168 if (r)
169 return r;
170
171 hw_guard_start(td, 120);
172
173 msleep(5);
174
175 return 0;
176}
177
178static int taal_get_id(u8 *id1, u8 *id2, u8 *id3)
179{
180 int r;
181
182 r = taal_dcs_read_1(DCS_GET_ID1, id1);
183 if (r)
184 return r;
185 r = taal_dcs_read_1(DCS_GET_ID2, id2);
186 if (r)
187 return r;
188 r = taal_dcs_read_1(DCS_GET_ID3, id3);
189 if (r)
190 return r;
191
192 return 0;
193}
194
195static int taal_set_addr_mode(u8 rotate, bool mirror)
196{
197 int r;
198 u8 mode;
199 int b5, b6, b7;
200
201 r = taal_dcs_read_1(DCS_READ_MADCTL, &mode);
202 if (r)
203 return r;
204
205 switch (rotate) {
206 default:
207 case 0:
208 b7 = 0;
209 b6 = 0;
210 b5 = 0;
211 break;
212 case 1:
213 b7 = 0;
214 b6 = 1;
215 b5 = 1;
216 break;
217 case 2:
218 b7 = 1;
219 b6 = 1;
220 b5 = 0;
221 break;
222 case 3:
223 b7 = 1;
224 b6 = 0;
225 b5 = 1;
226 break;
227 }
228
229 if (mirror)
230 b6 = !b6;
231
232 mode &= ~((1<<7) | (1<<6) | (1<<5));
233 mode |= (b7 << 7) | (b6 << 6) | (b5 << 5);
234
235 return taal_dcs_write_1(DCS_MEM_ACC_CTRL, mode);
236}
237
238static int taal_set_update_window(u16 x, u16 y, u16 w, u16 h)
239{
240 int r;
241 u16 x1 = x;
242 u16 x2 = x + w - 1;
243 u16 y1 = y;
244 u16 y2 = y + h - 1;
245
246 u8 buf[5];
247 buf[0] = DCS_COLUMN_ADDR;
248 buf[1] = (x1 >> 8) & 0xff;
249 buf[2] = (x1 >> 0) & 0xff;
250 buf[3] = (x2 >> 8) & 0xff;
251 buf[4] = (x2 >> 0) & 0xff;
252
253 r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
254 if (r)
255 return r;
256
257 buf[0] = DCS_PAGE_ADDR;
258 buf[1] = (y1 >> 8) & 0xff;
259 buf[2] = (y1 >> 0) & 0xff;
260 buf[3] = (y2 >> 8) & 0xff;
261 buf[4] = (y2 >> 0) & 0xff;
262
263 r = dsi_vc_dcs_write_nosync(TCH, buf, sizeof(buf));
264 if (r)
265 return r;
266
267 dsi_vc_send_bta_sync(TCH);
268
269 return r;
270}
271
272static int taal_bl_update_status(struct backlight_device *dev)
273{
274 struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
275 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
276 int r;
277 int level;
278
279 if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
280 dev->props.power == FB_BLANK_UNBLANK)
281 level = dev->props.brightness;
282 else
283 level = 0;
284
285 dev_dbg(&dssdev->dev, "update brightness to %d\n", level);
286
287 if (td->use_dsi_bl) {
288 if (td->enabled) {
289 dsi_bus_lock();
290 r = taal_dcs_write_1(DCS_BRIGHTNESS, level);
291 dsi_bus_unlock();
292 if (r)
293 return r;
294 }
295 } else {
296 if (!dssdev->set_backlight)
297 return -EINVAL;
298
299 r = dssdev->set_backlight(dssdev, level);
300 if (r)
301 return r;
302 }
303
304 return 0;
305}
306
307static int taal_bl_get_intensity(struct backlight_device *dev)
308{
309 if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
310 dev->props.power == FB_BLANK_UNBLANK)
311 return dev->props.brightness;
312
313 return 0;
314}
315
316static struct backlight_ops taal_bl_ops = {
317 .get_brightness = taal_bl_get_intensity,
318 .update_status = taal_bl_update_status,
319};
320
321static void taal_get_timings(struct omap_dss_device *dssdev,
322 struct omap_video_timings *timings)
323{
324 *timings = dssdev->panel.timings;
325}
326
327static void taal_get_resolution(struct omap_dss_device *dssdev,
328 u16 *xres, u16 *yres)
329{
330 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
331
332 if (td->rotate == 0 || td->rotate == 2) {
333 *xres = dssdev->panel.timings.x_res;
334 *yres = dssdev->panel.timings.y_res;
335 } else {
336 *yres = dssdev->panel.timings.x_res;
337 *xres = dssdev->panel.timings.y_res;
338 }
339}
340
341static irqreturn_t taal_te_isr(int irq, void *data)
342{
343 struct omap_dss_device *dssdev = data;
344 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
345
346 complete_all(&td->te_completion);
347
348 return IRQ_HANDLED;
349}
350
351static ssize_t taal_num_errors_show(struct device *dev,
352 struct device_attribute *attr, char *buf)
353{
354 struct omap_dss_device *dssdev = to_dss_device(dev);
355 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
356 u8 errors;
357 int r;
358
359 if (td->enabled) {
360 dsi_bus_lock();
361 r = taal_dcs_read_1(DCS_READ_NUM_ERRORS, &errors);
362 dsi_bus_unlock();
363 } else {
364 r = -ENODEV;
365 }
366
367 if (r)
368 return r;
369
370 return snprintf(buf, PAGE_SIZE, "%d\n", errors);
371}
372
373static ssize_t taal_hw_revision_show(struct device *dev,
374 struct device_attribute *attr, char *buf)
375{
376 struct omap_dss_device *dssdev = to_dss_device(dev);
377 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
378 u8 id1, id2, id3;
379 int r;
380
381 if (td->enabled) {
382 dsi_bus_lock();
383 r = taal_get_id(&id1, &id2, &id3);
384 dsi_bus_unlock();
385 } else {
386 r = -ENODEV;
387 }
388
389 if (r)
390 return r;
391
392 return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
393}
394
395static const char *cabc_modes[] = {
396 "off", /* used also always when CABC is not supported */
397 "ui",
398 "still-image",
399 "moving-image",
400};
401
402static ssize_t show_cabc_mode(struct device *dev,
403 struct device_attribute *attr,
404 char *buf)
405{
406 struct omap_dss_device *dssdev = to_dss_device(dev);
407 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
408 const char *mode_str;
409 int mode;
410 int len;
411
412 mode = td->cabc_mode;
413
414 mode_str = "unknown";
415 if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
416 mode_str = cabc_modes[mode];
417 len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
418
419 return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
420}
421
422static ssize_t store_cabc_mode(struct device *dev,
423 struct device_attribute *attr,
424 const char *buf, size_t count)
425{
426 struct omap_dss_device *dssdev = to_dss_device(dev);
427 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
428 int i;
429
430 for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
431 if (sysfs_streq(cabc_modes[i], buf))
432 break;
433 }
434
435 if (i == ARRAY_SIZE(cabc_modes))
436 return -EINVAL;
437
438 if (td->enabled) {
439 dsi_bus_lock();
440 if (!td->cabc_broken)
441 taal_dcs_write_1(DCS_WRITE_CABC, i);
442 dsi_bus_unlock();
443 }
444
445 td->cabc_mode = i;
446
447 return count;
448}
449
450static ssize_t show_cabc_available_modes(struct device *dev,
451 struct device_attribute *attr,
452 char *buf)
453{
454 int len;
455 int i;
456
457 for (i = 0, len = 0;
458 len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
459 len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
460 i ? " " : "", cabc_modes[i],
461 i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
462
463 return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
464}
465
466static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL);
467static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL);
468static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
469 show_cabc_mode, store_cabc_mode);
470static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
471 show_cabc_available_modes, NULL);
472
473static struct attribute *taal_attrs[] = {
474 &dev_attr_num_dsi_errors.attr,
475 &dev_attr_hw_revision.attr,
476 &dev_attr_cabc_mode.attr,
477 &dev_attr_cabc_available_modes.attr,
478 NULL,
479};
480
481static struct attribute_group taal_attr_group = {
482 .attrs = taal_attrs,
483};
484
485static int taal_probe(struct omap_dss_device *dssdev)
486{
487 struct taal_data *td;
488 struct backlight_device *bldev;
489 int r;
490
491 const struct omap_video_timings taal_panel_timings = {
492 .x_res = 864,
493 .y_res = 480,
494 };
495
496 dev_dbg(&dssdev->dev, "probe\n");
497
498 dssdev->panel.config = OMAP_DSS_LCD_TFT;
499 dssdev->panel.timings = taal_panel_timings;
500 dssdev->ctrl.pixel_size = 24;
501
502 td = kzalloc(sizeof(*td), GFP_KERNEL);
503 if (!td) {
504 r = -ENOMEM;
505 goto err0;
506 }
507 td->dssdev = dssdev;
508
509 td->esd_wq = create_singlethread_workqueue("taal_esd");
510 if (td->esd_wq == NULL) {
511 dev_err(&dssdev->dev, "can't create ESD workqueue\n");
512 r = -ENOMEM;
513 goto err2;
514 }
515 INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
516
517 dev_set_drvdata(&dssdev->dev, td);
518
519 dssdev->get_timings = taal_get_timings;
520 dssdev->get_resolution = taal_get_resolution;
521
522 /* if no platform set_backlight() defined, presume DSI backlight
523 * control */
524 if (!dssdev->set_backlight)
525 td->use_dsi_bl = true;
526
527 bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
528 &taal_bl_ops);
529 if (IS_ERR(bldev)) {
530 r = PTR_ERR(bldev);
531 goto err1;
532 }
533
534 td->bldev = bldev;
535
536 bldev->props.fb_blank = FB_BLANK_UNBLANK;
537 bldev->props.power = FB_BLANK_UNBLANK;
538 if (td->use_dsi_bl) {
539 bldev->props.max_brightness = 255;
540 bldev->props.brightness = 255;
541 } else {
542 bldev->props.max_brightness = 127;
543 bldev->props.brightness = 127;
544 }
545
546 taal_bl_update_status(bldev);
547
548 if (dssdev->phy.dsi.ext_te) {
549 int gpio = dssdev->phy.dsi.ext_te_gpio;
550
551 r = gpio_request(gpio, "taal irq");
552 if (r) {
553 dev_err(&dssdev->dev, "GPIO request failed\n");
554 goto err3;
555 }
556
557 gpio_direction_input(gpio);
558
559 r = request_irq(gpio_to_irq(gpio), taal_te_isr,
560 IRQF_DISABLED | IRQF_TRIGGER_RISING,
561 "taal vsync", dssdev);
562
563 if (r) {
564 dev_err(&dssdev->dev, "IRQ request failed\n");
565 gpio_free(gpio);
566 goto err3;
567 }
568
569 init_completion(&td->te_completion);
570
571 td->use_ext_te = true;
572 }
573
574 r = sysfs_create_group(&dssdev->dev.kobj, &taal_attr_group);
575 if (r) {
576 dev_err(&dssdev->dev, "failed to create sysfs files\n");
577 goto err4;
578 }
579
580 return 0;
581err4:
582 if (td->use_ext_te) {
583 int gpio = dssdev->phy.dsi.ext_te_gpio;
584 free_irq(gpio_to_irq(gpio), dssdev);
585 gpio_free(gpio);
586 }
587err3:
588 backlight_device_unregister(bldev);
589err2:
590 cancel_delayed_work_sync(&td->esd_work);
591 destroy_workqueue(td->esd_wq);
592err1:
593 kfree(td);
594err0:
595 return r;
596}
597
598static void taal_remove(struct omap_dss_device *dssdev)
599{
600 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
601 struct backlight_device *bldev;
602
603 dev_dbg(&dssdev->dev, "remove\n");
604
605 sysfs_remove_group(&dssdev->dev.kobj, &taal_attr_group);
606
607 if (td->use_ext_te) {
608 int gpio = dssdev->phy.dsi.ext_te_gpio;
609 free_irq(gpio_to_irq(gpio), dssdev);
610 gpio_free(gpio);
611 }
612
613 bldev = td->bldev;
614 bldev->props.power = FB_BLANK_POWERDOWN;
615 taal_bl_update_status(bldev);
616 backlight_device_unregister(bldev);
617
618 cancel_delayed_work_sync(&td->esd_work);
619 destroy_workqueue(td->esd_wq);
620
621 kfree(td);
622}
623
624static int taal_enable(struct omap_dss_device *dssdev)
625{
626 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
627 u8 id1, id2, id3;
628 int r;
629
630 dev_dbg(&dssdev->dev, "enable\n");
631
632 if (dssdev->platform_enable) {
633 r = dssdev->platform_enable(dssdev);
634 if (r)
635 return r;
636 }
637
638 /* it seems we have to wait a bit until taal is ready */
639 msleep(5);
640
641 r = taal_sleep_out(td);
642 if (r)
643 goto err;
644
645 r = taal_get_id(&id1, &id2, &id3);
646 if (r)
647 goto err;
648
649 /* on early revisions CABC is broken */
650 if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
651 td->cabc_broken = true;
652
653 taal_dcs_write_1(DCS_BRIGHTNESS, 0xff);
654 taal_dcs_write_1(DCS_CTRL_DISPLAY, (1<<2) | (1<<5)); /* BL | BCTRL */
655
656 taal_dcs_write_1(DCS_PIXEL_FORMAT, 0x7); /* 24bit/pixel */
657
658 taal_set_addr_mode(td->rotate, td->mirror);
659 if (!td->cabc_broken)
660 taal_dcs_write_1(DCS_WRITE_CABC, td->cabc_mode);
661
662 taal_dcs_write_0(DCS_DISPLAY_ON);
663
664#ifdef TAAL_USE_ESD_CHECK
665 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
666#endif
667
668 td->enabled = 1;
669
670 if (!td->intro_printed) {
671 dev_info(&dssdev->dev, "revision %02x.%02x.%02x\n",
672 id1, id2, id3);
673 if (td->cabc_broken)
674 dev_info(&dssdev->dev,
675 "old Taal version, CABC disabled\n");
676 td->intro_printed = true;
677 }
678
679 return 0;
680err:
681 if (dssdev->platform_disable)
682 dssdev->platform_disable(dssdev);
683
684 return r;
685}
686
687static void taal_disable(struct omap_dss_device *dssdev)
688{
689 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
690
691 dev_dbg(&dssdev->dev, "disable\n");
692
693 cancel_delayed_work(&td->esd_work);
694
695 taal_dcs_write_0(DCS_DISPLAY_OFF);
696 taal_sleep_in(td);
697
698 /* wait a bit so that the message goes through */
699 msleep(10);
700
701 if (dssdev->platform_disable)
702 dssdev->platform_disable(dssdev);
703
704 td->enabled = 0;
705}
706
707static int taal_suspend(struct omap_dss_device *dssdev)
708{
709 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
710 struct backlight_device *bldev = td->bldev;
711
712 bldev->props.power = FB_BLANK_POWERDOWN;
713 taal_bl_update_status(bldev);
714
715 return 0;
716}
717
718static int taal_resume(struct omap_dss_device *dssdev)
719{
720 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
721 struct backlight_device *bldev = td->bldev;
722
723 bldev->props.power = FB_BLANK_UNBLANK;
724 taal_bl_update_status(bldev);
725
726 return 0;
727}
728
729static void taal_setup_update(struct omap_dss_device *dssdev,
730 u16 x, u16 y, u16 w, u16 h)
731{
732 taal_set_update_window(x, y, w, h);
733}
734
735static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
736{
737 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
738 int r;
739
740 td->te_enabled = enable;
741
742 if (enable)
743 r = taal_dcs_write_1(DCS_TEAR_ON, 0);
744 else
745 r = taal_dcs_write_0(DCS_TEAR_OFF);
746
747 return r;
748}
749
750static int taal_wait_te(struct omap_dss_device *dssdev)
751{
752 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
753 long wait = msecs_to_jiffies(500);
754
755 if (!td->use_ext_te || !td->te_enabled)
756 return 0;
757
758 INIT_COMPLETION(td->te_completion);
759 wait = wait_for_completion_timeout(&td->te_completion, wait);
760 if (wait == 0) {
761 dev_err(&dssdev->dev, "timeout waiting TE\n");
762 return -ETIME;
763 }
764
765 return 0;
766}
767
768static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
769{
770 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
771 int r;
772
773 dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
774
775 if (td->enabled) {
776 r = taal_set_addr_mode(rotate, td->mirror);
777
778 if (r)
779 return r;
780 }
781
782 td->rotate = rotate;
783
784 return 0;
785}
786
787static u8 taal_get_rotate(struct omap_dss_device *dssdev)
788{
789 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
790 return td->rotate;
791}
792
793static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
794{
795 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
796 int r;
797
798 dev_dbg(&dssdev->dev, "mirror %d\n", enable);
799
800 if (td->enabled) {
801 r = taal_set_addr_mode(td->rotate, enable);
802
803 if (r)
804 return r;
805 }
806
807 td->mirror = enable;
808
809 return 0;
810}
811
812static bool taal_get_mirror(struct omap_dss_device *dssdev)
813{
814 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
815 return td->mirror;
816}
817
818static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
819{
820 u8 id1, id2, id3;
821 int r;
822
823 r = taal_dcs_read_1(DCS_GET_ID1, &id1);
824 if (r)
825 return r;
826 r = taal_dcs_read_1(DCS_GET_ID2, &id2);
827 if (r)
828 return r;
829 r = taal_dcs_read_1(DCS_GET_ID3, &id3);
830 if (r)
831 return r;
832
833 return 0;
834}
835
836static int taal_memory_read(struct omap_dss_device *dssdev,
837 void *buf, size_t size,
838 u16 x, u16 y, u16 w, u16 h)
839{
840 int r;
841 int first = 1;
842 int plen;
843 unsigned buf_used = 0;
844
845 if (size < w * h * 3)
846 return -ENOMEM;
847
848 size = min(w * h * 3,
849 dssdev->panel.timings.x_res *
850 dssdev->panel.timings.y_res * 3);
851
852 /* plen 1 or 2 goes into short packet. until checksum error is fixed,
853 * use short packets. plen 32 works, but bigger packets seem to cause
854 * an error. */
855 if (size % 2)
856 plen = 1;
857 else
858 plen = 2;
859
860 taal_setup_update(dssdev, x, y, w, h);
861
862 r = dsi_vc_set_max_rx_packet_size(TCH, plen);
863 if (r)
864 return r;
865
866 while (buf_used < size) {
867 u8 dcs_cmd = first ? 0x2e : 0x3e;
868 first = 0;
869
870 r = dsi_vc_dcs_read(TCH, dcs_cmd,
871 buf + buf_used, size - buf_used);
872
873 if (r < 0) {
874 dev_err(&dssdev->dev, "read error\n");
875 goto err;
876 }
877
878 buf_used += r;
879
880 if (r < plen) {
881 dev_err(&dssdev->dev, "short read\n");
882 break;
883 }
884
885 if (signal_pending(current)) {
886 dev_err(&dssdev->dev, "signal pending, "
887 "aborting memory read\n");
888 r = -ERESTARTSYS;
889 goto err;
890 }
891 }
892
893 r = buf_used;
894
895err:
896 dsi_vc_set_max_rx_packet_size(TCH, 1);
897
898 return r;
899}
900
901static void taal_esd_work(struct work_struct *work)
902{
903 struct taal_data *td = container_of(work, struct taal_data,
904 esd_work.work);
905 struct omap_dss_device *dssdev = td->dssdev;
906 u8 state1, state2;
907 int r;
908
909 if (!td->enabled)
910 return;
911
912 dsi_bus_lock();
913
914 r = taal_dcs_read_1(DCS_RDDSDR, &state1);
915 if (r) {
916 dev_err(&dssdev->dev, "failed to read Taal status\n");
917 goto err;
918 }
919
920 /* Run self diagnostics */
921 r = taal_sleep_out(td);
922 if (r) {
923 dev_err(&dssdev->dev, "failed to run Taal self-diagnostics\n");
924 goto err;
925 }
926
927 r = taal_dcs_read_1(DCS_RDDSDR, &state2);
928 if (r) {
929 dev_err(&dssdev->dev, "failed to read Taal status\n");
930 goto err;
931 }
932
933 /* Each sleep out command will trigger a self diagnostic and flip
934 * Bit6 if the test passes.
935 */
936 if (!((state1 ^ state2) & (1 << 6))) {
937 dev_err(&dssdev->dev, "LCD self diagnostics failed\n");
938 goto err;
939 }
940 /* Self-diagnostics result is also shown on TE GPIO line. We need
941 * to re-enable TE after self diagnostics */
942 if (td->use_ext_te && td->te_enabled)
943 taal_enable_te(dssdev, true);
944
945 dsi_bus_unlock();
946
947 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
948
949 return;
950err:
951 dev_err(&dssdev->dev, "performing LCD reset\n");
952
953 taal_disable(dssdev);
954 taal_enable(dssdev);
955
956 dsi_bus_unlock();
957
958 queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
959}
960
961static struct omap_dss_driver taal_driver = {
962 .probe = taal_probe,
963 .remove = taal_remove,
964
965 .enable = taal_enable,
966 .disable = taal_disable,
967 .suspend = taal_suspend,
968 .resume = taal_resume,
969
970 .setup_update = taal_setup_update,
971 .enable_te = taal_enable_te,
972 .wait_for_te = taal_wait_te,
973 .set_rotate = taal_rotate,
974 .get_rotate = taal_get_rotate,
975 .set_mirror = taal_mirror,
976 .get_mirror = taal_get_mirror,
977 .run_test = taal_run_test,
978 .memory_read = taal_memory_read,
979
980 .driver = {
981 .name = "taal",
982 .owner = THIS_MODULE,
983 },
984};
985
986static int __init taal_init(void)
987{
988 omap_dss_register_driver(&taal_driver);
989
990 return 0;
991}
992
993static void __exit taal_exit(void)
994{
995 omap_dss_unregister_driver(&taal_driver);
996}
997
998module_init(taal_init);
999module_exit(taal_exit);
1000
1001MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
1002MODULE_DESCRIPTION("Taal Driver");
1003MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
new file mode 100644
index 000000000000..71d8dec30635
--- /dev/null
+++ b/drivers/video/omap2/dss/Kconfig
@@ -0,0 +1,89 @@
1menuconfig OMAP2_DSS
2 tristate "OMAP2/3 Display Subsystem support (EXPERIMENTAL)"
3 depends on ARCH_OMAP2 || ARCH_OMAP3
4 help
5 OMAP2/3 Display Subsystem support.
6
7if OMAP2_DSS
8
9config OMAP2_VRAM_SIZE
10 int "VRAM size (MB)"
11 range 0 32
12 default 0
13 help
14 The amount of SDRAM to reserve at boot time for video RAM use.
15 This VRAM will be used by omapfb and other drivers that need
16 large continuous RAM area for video use.
17
18 You can also set this with "vram=<bytes>" kernel argument, or
19 in the board file.
20
21config OMAP2_DSS_DEBUG_SUPPORT
22 bool "Debug support"
23 default y
24 help
25 This enables debug messages. You need to enable printing
26 with 'debug' module parameter.
27
28config OMAP2_DSS_RFBI
29 bool "RFBI support"
30 default n
31 help
32 MIPI DBI, or RFBI (Remote Framebuffer Interface), support.
33
34config OMAP2_DSS_VENC
35 bool "VENC support"
36 default y
37 help
38 OMAP Video Encoder support.
39
40config OMAP2_DSS_SDI
41 bool "SDI support"
42 depends on ARCH_OMAP3
43 default n
44 help
45 SDI (Serial Display Interface) support.
46
47config OMAP2_DSS_DSI
48 bool "DSI support"
49 depends on ARCH_OMAP3
50 default n
51 help
52 MIPI DSI support.
53
54config OMAP2_DSS_USE_DSI_PLL
55 bool "Use DSI PLL for PCLK (EXPERIMENTAL)"
56 default n
57 depends on OMAP2_DSS_DSI
58 help
59 Use DSI PLL to generate pixel clock. Currently only for DPI output.
60 DSI PLL can be used to generate higher and more precise pixel clocks.
61
62config OMAP2_DSS_FAKE_VSYNC
63 bool "Fake VSYNC irq from manual update displays"
64 default n
65 help
66 If this is selected, DSI will generate a fake DISPC VSYNC interrupt
67 when DSI has sent a frame. This is only needed with DSI or RFBI
68 displays using manual mode, and you want VSYNC to, for example,
69 time animation.
70
71config OMAP2_DSS_MIN_FCK_PER_PCK
72 int "Minimum FCK/PCK ratio (for scaling)"
73 range 0 32
74 default 0
75 help
76 This can be used to adjust the minimum FCK/PCK ratio.
77
78 With this you can make sure that DISPC FCK is at least
79 n x PCK. Video plane scaling requires higher FCK than
80 normally.
81
82 If this is set to 0, there's no extra constraint on the
83 DISPC FCK. However, the FCK will at minimum be
84 2xPCK (if active matrix) or 3xPCK (if passive matrix).
85
86 Max FCK is 173MHz, so this doesn't work if your PCK
87 is very high.
88
89endif
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
new file mode 100644
index 000000000000..980c72c2db98
--- /dev/null
+++ b/drivers/video/omap2/dss/Makefile
@@ -0,0 +1,6 @@
1obj-$(CONFIG_OMAP2_DSS) += omapdss.o
2omapdss-y := core.o dss.o dispc.o dpi.o display.o manager.o overlay.o
3omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
4omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
5omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
6omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
new file mode 100644
index 000000000000..29497a0c9a91
--- /dev/null
+++ b/drivers/video/omap2/dss/core.c
@@ -0,0 +1,919 @@
1/*
2 * linux/drivers/video/omap2/dss/core.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "CORE"
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/clk.h>
28#include <linux/err.h>
29#include <linux/platform_device.h>
30#include <linux/seq_file.h>
31#include <linux/debugfs.h>
32#include <linux/io.h>
33#include <linux/device.h>
34
35#include <plat/display.h>
36#include <plat/clock.h>
37
38#include "dss.h"
39
40static struct {
41 struct platform_device *pdev;
42 int ctx_id;
43
44 struct clk *dss_ick;
45 struct clk *dss1_fck;
46 struct clk *dss2_fck;
47 struct clk *dss_54m_fck;
48 struct clk *dss_96m_fck;
49 unsigned num_clks_enabled;
50} core;
51
52static void dss_clk_enable_all_no_ctx(void);
53static void dss_clk_disable_all_no_ctx(void);
54static void dss_clk_enable_no_ctx(enum dss_clock clks);
55static void dss_clk_disable_no_ctx(enum dss_clock clks);
56
57static char *def_disp_name;
58module_param_named(def_disp, def_disp_name, charp, 0);
59MODULE_PARM_DESC(def_disp_name, "default display name");
60
61#ifdef DEBUG
62unsigned int dss_debug;
63module_param_named(debug, dss_debug, bool, 0644);
64#endif
65
66/* CONTEXT */
67static int dss_get_ctx_id(void)
68{
69 struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
70 int r;
71
72 if (!pdata->get_last_off_on_transaction_id)
73 return 0;
74 r = pdata->get_last_off_on_transaction_id(&core.pdev->dev);
75 if (r < 0) {
76 dev_err(&core.pdev->dev, "getting transaction ID failed, "
77 "will force context restore\n");
78 r = -1;
79 }
80 return r;
81}
82
83int dss_need_ctx_restore(void)
84{
85 int id = dss_get_ctx_id();
86
87 if (id < 0 || id != core.ctx_id) {
88 DSSDBG("ctx id %d -> id %d\n",
89 core.ctx_id, id);
90 core.ctx_id = id;
91 return 1;
92 } else {
93 return 0;
94 }
95}
96
97static void save_all_ctx(void)
98{
99 DSSDBG("save context\n");
100
101 dss_clk_enable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
102
103 dss_save_context();
104 dispc_save_context();
105#ifdef CONFIG_OMAP2_DSS_DSI
106 dsi_save_context();
107#endif
108
109 dss_clk_disable_no_ctx(DSS_CLK_ICK | DSS_CLK_FCK1);
110}
111
112static void restore_all_ctx(void)
113{
114 DSSDBG("restore context\n");
115
116 dss_clk_enable_all_no_ctx();
117
118 dss_restore_context();
119 dispc_restore_context();
120#ifdef CONFIG_OMAP2_DSS_DSI
121 dsi_restore_context();
122#endif
123
124 dss_clk_disable_all_no_ctx();
125}
126
127/* CLOCKS */
128static void core_dump_clocks(struct seq_file *s)
129{
130 int i;
131 struct clk *clocks[5] = {
132 core.dss_ick,
133 core.dss1_fck,
134 core.dss2_fck,
135 core.dss_54m_fck,
136 core.dss_96m_fck
137 };
138
139 seq_printf(s, "- CORE -\n");
140
141 seq_printf(s, "internal clk count\t\t%u\n", core.num_clks_enabled);
142
143 for (i = 0; i < 5; i++) {
144 if (!clocks[i])
145 continue;
146 seq_printf(s, "%-15s\t%lu\t%d\n",
147 clocks[i]->name,
148 clk_get_rate(clocks[i]),
149 clocks[i]->usecount);
150 }
151}
152
153static int dss_get_clock(struct clk **clock, const char *clk_name)
154{
155 struct clk *clk;
156
157 clk = clk_get(&core.pdev->dev, clk_name);
158
159 if (IS_ERR(clk)) {
160 DSSERR("can't get clock %s", clk_name);
161 return PTR_ERR(clk);
162 }
163
164 *clock = clk;
165
166 DSSDBG("clk %s, rate %ld\n", clk_name, clk_get_rate(clk));
167
168 return 0;
169}
170
171static int dss_get_clocks(void)
172{
173 int r;
174
175 core.dss_ick = NULL;
176 core.dss1_fck = NULL;
177 core.dss2_fck = NULL;
178 core.dss_54m_fck = NULL;
179 core.dss_96m_fck = NULL;
180
181 r = dss_get_clock(&core.dss_ick, "ick");
182 if (r)
183 goto err;
184
185 r = dss_get_clock(&core.dss1_fck, "dss1_fck");
186 if (r)
187 goto err;
188
189 r = dss_get_clock(&core.dss2_fck, "dss2_fck");
190 if (r)
191 goto err;
192
193 r = dss_get_clock(&core.dss_54m_fck, "tv_fck");
194 if (r)
195 goto err;
196
197 r = dss_get_clock(&core.dss_96m_fck, "video_fck");
198 if (r)
199 goto err;
200
201 return 0;
202
203err:
204 if (core.dss_ick)
205 clk_put(core.dss_ick);
206 if (core.dss1_fck)
207 clk_put(core.dss1_fck);
208 if (core.dss2_fck)
209 clk_put(core.dss2_fck);
210 if (core.dss_54m_fck)
211 clk_put(core.dss_54m_fck);
212 if (core.dss_96m_fck)
213 clk_put(core.dss_96m_fck);
214
215 return r;
216}
217
218static void dss_put_clocks(void)
219{
220 if (core.dss_96m_fck)
221 clk_put(core.dss_96m_fck);
222 clk_put(core.dss_54m_fck);
223 clk_put(core.dss1_fck);
224 clk_put(core.dss2_fck);
225 clk_put(core.dss_ick);
226}
227
228unsigned long dss_clk_get_rate(enum dss_clock clk)
229{
230 switch (clk) {
231 case DSS_CLK_ICK:
232 return clk_get_rate(core.dss_ick);
233 case DSS_CLK_FCK1:
234 return clk_get_rate(core.dss1_fck);
235 case DSS_CLK_FCK2:
236 return clk_get_rate(core.dss2_fck);
237 case DSS_CLK_54M:
238 return clk_get_rate(core.dss_54m_fck);
239 case DSS_CLK_96M:
240 return clk_get_rate(core.dss_96m_fck);
241 }
242
243 BUG();
244 return 0;
245}
246
247static unsigned count_clk_bits(enum dss_clock clks)
248{
249 unsigned num_clks = 0;
250
251 if (clks & DSS_CLK_ICK)
252 ++num_clks;
253 if (clks & DSS_CLK_FCK1)
254 ++num_clks;
255 if (clks & DSS_CLK_FCK2)
256 ++num_clks;
257 if (clks & DSS_CLK_54M)
258 ++num_clks;
259 if (clks & DSS_CLK_96M)
260 ++num_clks;
261
262 return num_clks;
263}
264
265static void dss_clk_enable_no_ctx(enum dss_clock clks)
266{
267 unsigned num_clks = count_clk_bits(clks);
268
269 if (clks & DSS_CLK_ICK)
270 clk_enable(core.dss_ick);
271 if (clks & DSS_CLK_FCK1)
272 clk_enable(core.dss1_fck);
273 if (clks & DSS_CLK_FCK2)
274 clk_enable(core.dss2_fck);
275 if (clks & DSS_CLK_54M)
276 clk_enable(core.dss_54m_fck);
277 if (clks & DSS_CLK_96M)
278 clk_enable(core.dss_96m_fck);
279
280 core.num_clks_enabled += num_clks;
281}
282
283void dss_clk_enable(enum dss_clock clks)
284{
285 dss_clk_enable_no_ctx(clks);
286
287 if (cpu_is_omap34xx() && dss_need_ctx_restore())
288 restore_all_ctx();
289}
290
291static void dss_clk_disable_no_ctx(enum dss_clock clks)
292{
293 unsigned num_clks = count_clk_bits(clks);
294
295 if (clks & DSS_CLK_ICK)
296 clk_disable(core.dss_ick);
297 if (clks & DSS_CLK_FCK1)
298 clk_disable(core.dss1_fck);
299 if (clks & DSS_CLK_FCK2)
300 clk_disable(core.dss2_fck);
301 if (clks & DSS_CLK_54M)
302 clk_disable(core.dss_54m_fck);
303 if (clks & DSS_CLK_96M)
304 clk_disable(core.dss_96m_fck);
305
306 core.num_clks_enabled -= num_clks;
307}
308
309void dss_clk_disable(enum dss_clock clks)
310{
311 if (cpu_is_omap34xx()) {
312 unsigned num_clks = count_clk_bits(clks);
313
314 BUG_ON(core.num_clks_enabled < num_clks);
315
316 if (core.num_clks_enabled == num_clks)
317 save_all_ctx();
318 }
319
320 dss_clk_disable_no_ctx(clks);
321}
322
323static void dss_clk_enable_all_no_ctx(void)
324{
325 enum dss_clock clks;
326
327 clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
328 if (cpu_is_omap34xx())
329 clks |= DSS_CLK_96M;
330 dss_clk_enable_no_ctx(clks);
331}
332
333static void dss_clk_disable_all_no_ctx(void)
334{
335 enum dss_clock clks;
336
337 clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
338 if (cpu_is_omap34xx())
339 clks |= DSS_CLK_96M;
340 dss_clk_disable_no_ctx(clks);
341}
342
343static void dss_clk_disable_all(void)
344{
345 enum dss_clock clks;
346
347 clks = DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_FCK2 | DSS_CLK_54M;
348 if (cpu_is_omap34xx())
349 clks |= DSS_CLK_96M;
350 dss_clk_disable(clks);
351}
352
353/* DEBUGFS */
354#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
355static void dss_debug_dump_clocks(struct seq_file *s)
356{
357 core_dump_clocks(s);
358 dss_dump_clocks(s);
359 dispc_dump_clocks(s);
360#ifdef CONFIG_OMAP2_DSS_DSI
361 dsi_dump_clocks(s);
362#endif
363}
364
365static int dss_debug_show(struct seq_file *s, void *unused)
366{
367 void (*func)(struct seq_file *) = s->private;
368 func(s);
369 return 0;
370}
371
372static int dss_debug_open(struct inode *inode, struct file *file)
373{
374 return single_open(file, dss_debug_show, inode->i_private);
375}
376
377static const struct file_operations dss_debug_fops = {
378 .open = dss_debug_open,
379 .read = seq_read,
380 .llseek = seq_lseek,
381 .release = single_release,
382};
383
384static struct dentry *dss_debugfs_dir;
385
386static int dss_initialize_debugfs(void)
387{
388 dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
389 if (IS_ERR(dss_debugfs_dir)) {
390 int err = PTR_ERR(dss_debugfs_dir);
391 dss_debugfs_dir = NULL;
392 return err;
393 }
394
395 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
396 &dss_debug_dump_clocks, &dss_debug_fops);
397
398 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
399 &dss_dump_regs, &dss_debug_fops);
400 debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
401 &dispc_dump_regs, &dss_debug_fops);
402#ifdef CONFIG_OMAP2_DSS_RFBI
403 debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
404 &rfbi_dump_regs, &dss_debug_fops);
405#endif
406#ifdef CONFIG_OMAP2_DSS_DSI
407 debugfs_create_file("dsi", S_IRUGO, dss_debugfs_dir,
408 &dsi_dump_regs, &dss_debug_fops);
409#endif
410#ifdef CONFIG_OMAP2_DSS_VENC
411 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
412 &venc_dump_regs, &dss_debug_fops);
413#endif
414 return 0;
415}
416
417static void dss_uninitialize_debugfs(void)
418{
419 if (dss_debugfs_dir)
420 debugfs_remove_recursive(dss_debugfs_dir);
421}
422#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
423
424/* PLATFORM DEVICE */
425static int omap_dss_probe(struct platform_device *pdev)
426{
427 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
428 int skip_init = 0;
429 int r;
430 int i;
431
432 core.pdev = pdev;
433
434 dss_init_overlay_managers(pdev);
435 dss_init_overlays(pdev);
436
437 r = dss_get_clocks();
438 if (r)
439 goto fail0;
440
441 dss_clk_enable_all_no_ctx();
442
443 core.ctx_id = dss_get_ctx_id();
444 DSSDBG("initial ctx id %u\n", core.ctx_id);
445
446#ifdef CONFIG_FB_OMAP_BOOTLOADER_INIT
447 /* DISPC_CONTROL */
448 if (omap_readl(0x48050440) & 1) /* LCD enabled? */
449 skip_init = 1;
450#endif
451
452 r = dss_init(skip_init);
453 if (r) {
454 DSSERR("Failed to initialize DSS\n");
455 goto fail0;
456 }
457
458#ifdef CONFIG_OMAP2_DSS_RFBI
459 r = rfbi_init();
460 if (r) {
461 DSSERR("Failed to initialize rfbi\n");
462 goto fail0;
463 }
464#endif
465
466 r = dpi_init();
467 if (r) {
468 DSSERR("Failed to initialize dpi\n");
469 goto fail0;
470 }
471
472 r = dispc_init();
473 if (r) {
474 DSSERR("Failed to initialize dispc\n");
475 goto fail0;
476 }
477#ifdef CONFIG_OMAP2_DSS_VENC
478 r = venc_init(pdev);
479 if (r) {
480 DSSERR("Failed to initialize venc\n");
481 goto fail0;
482 }
483#endif
484 if (cpu_is_omap34xx()) {
485#ifdef CONFIG_OMAP2_DSS_SDI
486 r = sdi_init(skip_init);
487 if (r) {
488 DSSERR("Failed to initialize SDI\n");
489 goto fail0;
490 }
491#endif
492#ifdef CONFIG_OMAP2_DSS_DSI
493 r = dsi_init(pdev);
494 if (r) {
495 DSSERR("Failed to initialize DSI\n");
496 goto fail0;
497 }
498#endif
499 }
500
501#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
502 r = dss_initialize_debugfs();
503 if (r)
504 goto fail0;
505#endif
506
507 for (i = 0; i < pdata->num_devices; ++i) {
508 struct omap_dss_device *dssdev = pdata->devices[i];
509
510 r = omap_dss_register_device(dssdev);
511 if (r)
512 DSSERR("device reg failed %d\n", i);
513
514 if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
515 pdata->default_device = dssdev;
516 }
517
518 dss_clk_disable_all();
519
520 return 0;
521
522 /* XXX fail correctly */
523fail0:
524 return r;
525}
526
527static int omap_dss_remove(struct platform_device *pdev)
528{
529 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
530 int i;
531 int c;
532
533#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
534 dss_uninitialize_debugfs();
535#endif
536
537#ifdef CONFIG_OMAP2_DSS_VENC
538 venc_exit();
539#endif
540 dispc_exit();
541 dpi_exit();
542#ifdef CONFIG_OMAP2_DSS_RFBI
543 rfbi_exit();
544#endif
545 if (cpu_is_omap34xx()) {
546#ifdef CONFIG_OMAP2_DSS_DSI
547 dsi_exit();
548#endif
549#ifdef CONFIG_OMAP2_DSS_SDI
550 sdi_exit();
551#endif
552 }
553
554 dss_exit();
555
556 /* these should be removed at some point */
557 c = core.dss_ick->usecount;
558 if (c > 0) {
559 DSSERR("warning: dss_ick usecount %d, disabling\n", c);
560 while (c-- > 0)
561 clk_disable(core.dss_ick);
562 }
563
564 c = core.dss1_fck->usecount;
565 if (c > 0) {
566 DSSERR("warning: dss1_fck usecount %d, disabling\n", c);
567 while (c-- > 0)
568 clk_disable(core.dss1_fck);
569 }
570
571 c = core.dss2_fck->usecount;
572 if (c > 0) {
573 DSSERR("warning: dss2_fck usecount %d, disabling\n", c);
574 while (c-- > 0)
575 clk_disable(core.dss2_fck);
576 }
577
578 c = core.dss_54m_fck->usecount;
579 if (c > 0) {
580 DSSERR("warning: dss_54m_fck usecount %d, disabling\n", c);
581 while (c-- > 0)
582 clk_disable(core.dss_54m_fck);
583 }
584
585 if (core.dss_96m_fck) {
586 c = core.dss_96m_fck->usecount;
587 if (c > 0) {
588 DSSERR("warning: dss_96m_fck usecount %d, disabling\n",
589 c);
590 while (c-- > 0)
591 clk_disable(core.dss_96m_fck);
592 }
593 }
594
595 dss_put_clocks();
596
597 dss_uninit_overlays(pdev);
598 dss_uninit_overlay_managers(pdev);
599
600 for (i = 0; i < pdata->num_devices; ++i)
601 omap_dss_unregister_device(pdata->devices[i]);
602
603 return 0;
604}
605
606static void omap_dss_shutdown(struct platform_device *pdev)
607{
608 DSSDBG("shutdown\n");
609 dss_disable_all_devices();
610}
611
612static int omap_dss_suspend(struct platform_device *pdev, pm_message_t state)
613{
614 DSSDBG("suspend %d\n", state.event);
615
616 return dss_suspend_all_devices();
617}
618
619static int omap_dss_resume(struct platform_device *pdev)
620{
621 DSSDBG("resume\n");
622
623 return dss_resume_all_devices();
624}
625
626static struct platform_driver omap_dss_driver = {
627 .probe = omap_dss_probe,
628 .remove = omap_dss_remove,
629 .shutdown = omap_dss_shutdown,
630 .suspend = omap_dss_suspend,
631 .resume = omap_dss_resume,
632 .driver = {
633 .name = "omapdss",
634 .owner = THIS_MODULE,
635 },
636};
637
638/* BUS */
639static int dss_bus_match(struct device *dev, struct device_driver *driver)
640{
641 struct omap_dss_device *dssdev = to_dss_device(dev);
642
643 DSSDBG("bus_match. dev %s/%s, drv %s\n",
644 dev_name(dev), dssdev->driver_name, driver->name);
645
646 return strcmp(dssdev->driver_name, driver->name) == 0;
647}
648
649static ssize_t device_name_show(struct device *dev,
650 struct device_attribute *attr, char *buf)
651{
652 struct omap_dss_device *dssdev = to_dss_device(dev);
653 return snprintf(buf, PAGE_SIZE, "%s\n",
654 dssdev->name ?
655 dssdev->name : "");
656}
657
658static struct device_attribute default_dev_attrs[] = {
659 __ATTR(name, S_IRUGO, device_name_show, NULL),
660 __ATTR_NULL,
661};
662
663static ssize_t driver_name_show(struct device_driver *drv, char *buf)
664{
665 struct omap_dss_driver *dssdrv = to_dss_driver(drv);
666 return snprintf(buf, PAGE_SIZE, "%s\n",
667 dssdrv->driver.name ?
668 dssdrv->driver.name : "");
669}
670static struct driver_attribute default_drv_attrs[] = {
671 __ATTR(name, S_IRUGO, driver_name_show, NULL),
672 __ATTR_NULL,
673};
674
675static struct bus_type dss_bus_type = {
676 .name = "omapdss",
677 .match = dss_bus_match,
678 .dev_attrs = default_dev_attrs,
679 .drv_attrs = default_drv_attrs,
680};
681
682static void dss_bus_release(struct device *dev)
683{
684 DSSDBG("bus_release\n");
685}
686
687static struct device dss_bus = {
688 .release = dss_bus_release,
689};
690
691struct bus_type *dss_get_bus(void)
692{
693 return &dss_bus_type;
694}
695
696/* DRIVER */
697static int dss_driver_probe(struct device *dev)
698{
699 int r;
700 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
701 struct omap_dss_device *dssdev = to_dss_device(dev);
702 struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
703 bool force;
704
705 DSSDBG("driver_probe: dev %s/%s, drv %s\n",
706 dev_name(dev), dssdev->driver_name,
707 dssdrv->driver.name);
708
709 dss_init_device(core.pdev, dssdev);
710
711 /* skip this if the device is behind a ctrl */
712 if (!dssdev->panel.ctrl) {
713 force = pdata->default_device == dssdev;
714 dss_recheck_connections(dssdev, force);
715 }
716
717 r = dssdrv->probe(dssdev);
718
719 if (r) {
720 DSSERR("driver probe failed: %d\n", r);
721 return r;
722 }
723
724 DSSDBG("probe done for device %s\n", dev_name(dev));
725
726 dssdev->driver = dssdrv;
727
728 return 0;
729}
730
731static int dss_driver_remove(struct device *dev)
732{
733 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
734 struct omap_dss_device *dssdev = to_dss_device(dev);
735
736 DSSDBG("driver_remove: dev %s/%s\n", dev_name(dev),
737 dssdev->driver_name);
738
739 dssdrv->remove(dssdev);
740
741 dss_uninit_device(core.pdev, dssdev);
742
743 dssdev->driver = NULL;
744
745 return 0;
746}
747
748int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
749{
750 dssdriver->driver.bus = &dss_bus_type;
751 dssdriver->driver.probe = dss_driver_probe;
752 dssdriver->driver.remove = dss_driver_remove;
753 return driver_register(&dssdriver->driver);
754}
755EXPORT_SYMBOL(omap_dss_register_driver);
756
757void omap_dss_unregister_driver(struct omap_dss_driver *dssdriver)
758{
759 driver_unregister(&dssdriver->driver);
760}
761EXPORT_SYMBOL(omap_dss_unregister_driver);
762
763/* DEVICE */
764static void reset_device(struct device *dev, int check)
765{
766 u8 *dev_p = (u8 *)dev;
767 u8 *dev_end = dev_p + sizeof(*dev);
768 void *saved_pdata;
769
770 saved_pdata = dev->platform_data;
771 if (check) {
772 /*
773 * Check if there is any other setting than platform_data
774 * in struct device; warn that these will be reset by our
775 * init.
776 */
777 dev->platform_data = NULL;
778 while (dev_p < dev_end) {
779 if (*dev_p) {
780 WARN("%s: struct device fields will be "
781 "discarded\n",
782 __func__);
783 break;
784 }
785 dev_p++;
786 }
787 }
788 memset(dev, 0, sizeof(*dev));
789 dev->platform_data = saved_pdata;
790}
791
792
793static void omap_dss_dev_release(struct device *dev)
794{
795 reset_device(dev, 0);
796}
797
798int omap_dss_register_device(struct omap_dss_device *dssdev)
799{
800 static int dev_num;
801 static int panel_num;
802 int r;
803
804 WARN_ON(!dssdev->driver_name);
805
806 reset_device(&dssdev->dev, 1);
807 dssdev->dev.bus = &dss_bus_type;
808 dssdev->dev.parent = &dss_bus;
809 dssdev->dev.release = omap_dss_dev_release;
810 dev_set_name(&dssdev->dev, "display%d", dev_num++);
811 r = device_register(&dssdev->dev);
812 if (r)
813 return r;
814
815 if (dssdev->ctrl.panel) {
816 struct omap_dss_device *panel = dssdev->ctrl.panel;
817
818 panel->panel.ctrl = dssdev;
819
820 reset_device(&panel->dev, 1);
821 panel->dev.bus = &dss_bus_type;
822 panel->dev.parent = &dssdev->dev;
823 panel->dev.release = omap_dss_dev_release;
824 dev_set_name(&panel->dev, "panel%d", panel_num++);
825 r = device_register(&panel->dev);
826 if (r)
827 return r;
828 }
829
830 return 0;
831}
832
833void omap_dss_unregister_device(struct omap_dss_device *dssdev)
834{
835 device_unregister(&dssdev->dev);
836
837 if (dssdev->ctrl.panel) {
838 struct omap_dss_device *panel = dssdev->ctrl.panel;
839 device_unregister(&panel->dev);
840 }
841}
842
843/* BUS */
844static int omap_dss_bus_register(void)
845{
846 int r;
847
848 r = bus_register(&dss_bus_type);
849 if (r) {
850 DSSERR("bus register failed\n");
851 return r;
852 }
853
854 dev_set_name(&dss_bus, "omapdss");
855 r = device_register(&dss_bus);
856 if (r) {
857 DSSERR("bus driver register failed\n");
858 bus_unregister(&dss_bus_type);
859 return r;
860 }
861
862 return 0;
863}
864
865/* INIT */
866
867#ifdef CONFIG_OMAP2_DSS_MODULE
868static void omap_dss_bus_unregister(void)
869{
870 device_unregister(&dss_bus);
871
872 bus_unregister(&dss_bus_type);
873}
874
875static int __init omap_dss_init(void)
876{
877 int r;
878
879 r = omap_dss_bus_register();
880 if (r)
881 return r;
882
883 r = platform_driver_register(&omap_dss_driver);
884 if (r) {
885 omap_dss_bus_unregister();
886 return r;
887 }
888
889 return 0;
890}
891
892static void __exit omap_dss_exit(void)
893{
894 platform_driver_unregister(&omap_dss_driver);
895
896 omap_dss_bus_unregister();
897}
898
899module_init(omap_dss_init);
900module_exit(omap_dss_exit);
901#else
902static int __init omap_dss_init(void)
903{
904 return omap_dss_bus_register();
905}
906
907static int __init omap_dss_init2(void)
908{
909 return platform_driver_register(&omap_dss_driver);
910}
911
912core_initcall(omap_dss_init);
913device_initcall(omap_dss_init2);
914#endif
915
916MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
917MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
918MODULE_LICENSE("GPL v2");
919
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
new file mode 100644
index 000000000000..6dabf4b2f005
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc.c
@@ -0,0 +1,3091 @@
1/*
2 * linux/drivers/video/omap2/dss/dispc.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "DISPC"
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/vmalloc.h>
28#include <linux/clk.h>
29#include <linux/io.h>
30#include <linux/jiffies.h>
31#include <linux/seq_file.h>
32#include <linux/delay.h>
33#include <linux/workqueue.h>
34
35#include <plat/sram.h>
36#include <plat/clock.h>
37
38#include <plat/display.h>
39
40#include "dss.h"
41
42/* DISPC */
43#define DISPC_BASE 0x48050400
44
45#define DISPC_SZ_REGS SZ_1K
46
47struct dispc_reg { u16 idx; };
48
49#define DISPC_REG(idx) ((const struct dispc_reg) { idx })
50
51/* DISPC common */
52#define DISPC_REVISION DISPC_REG(0x0000)
53#define DISPC_SYSCONFIG DISPC_REG(0x0010)
54#define DISPC_SYSSTATUS DISPC_REG(0x0014)
55#define DISPC_IRQSTATUS DISPC_REG(0x0018)
56#define DISPC_IRQENABLE DISPC_REG(0x001C)
57#define DISPC_CONTROL DISPC_REG(0x0040)
58#define DISPC_CONFIG DISPC_REG(0x0044)
59#define DISPC_CAPABLE DISPC_REG(0x0048)
60#define DISPC_DEFAULT_COLOR0 DISPC_REG(0x004C)
61#define DISPC_DEFAULT_COLOR1 DISPC_REG(0x0050)
62#define DISPC_TRANS_COLOR0 DISPC_REG(0x0054)
63#define DISPC_TRANS_COLOR1 DISPC_REG(0x0058)
64#define DISPC_LINE_STATUS DISPC_REG(0x005C)
65#define DISPC_LINE_NUMBER DISPC_REG(0x0060)
66#define DISPC_TIMING_H DISPC_REG(0x0064)
67#define DISPC_TIMING_V DISPC_REG(0x0068)
68#define DISPC_POL_FREQ DISPC_REG(0x006C)
69#define DISPC_DIVISOR DISPC_REG(0x0070)
70#define DISPC_GLOBAL_ALPHA DISPC_REG(0x0074)
71#define DISPC_SIZE_DIG DISPC_REG(0x0078)
72#define DISPC_SIZE_LCD DISPC_REG(0x007C)
73
74/* DISPC GFX plane */
75#define DISPC_GFX_BA0 DISPC_REG(0x0080)
76#define DISPC_GFX_BA1 DISPC_REG(0x0084)
77#define DISPC_GFX_POSITION DISPC_REG(0x0088)
78#define DISPC_GFX_SIZE DISPC_REG(0x008C)
79#define DISPC_GFX_ATTRIBUTES DISPC_REG(0x00A0)
80#define DISPC_GFX_FIFO_THRESHOLD DISPC_REG(0x00A4)
81#define DISPC_GFX_FIFO_SIZE_STATUS DISPC_REG(0x00A8)
82#define DISPC_GFX_ROW_INC DISPC_REG(0x00AC)
83#define DISPC_GFX_PIXEL_INC DISPC_REG(0x00B0)
84#define DISPC_GFX_WINDOW_SKIP DISPC_REG(0x00B4)
85#define DISPC_GFX_TABLE_BA DISPC_REG(0x00B8)
86
87#define DISPC_DATA_CYCLE1 DISPC_REG(0x01D4)
88#define DISPC_DATA_CYCLE2 DISPC_REG(0x01D8)
89#define DISPC_DATA_CYCLE3 DISPC_REG(0x01DC)
90
91#define DISPC_CPR_COEF_R DISPC_REG(0x0220)
92#define DISPC_CPR_COEF_G DISPC_REG(0x0224)
93#define DISPC_CPR_COEF_B DISPC_REG(0x0228)
94
95#define DISPC_GFX_PRELOAD DISPC_REG(0x022C)
96
97/* DISPC Video plane, n = 0 for VID1 and n = 1 for VID2 */
98#define DISPC_VID_REG(n, idx) DISPC_REG(0x00BC + (n)*0x90 + idx)
99
100#define DISPC_VID_BA0(n) DISPC_VID_REG(n, 0x0000)
101#define DISPC_VID_BA1(n) DISPC_VID_REG(n, 0x0004)
102#define DISPC_VID_POSITION(n) DISPC_VID_REG(n, 0x0008)
103#define DISPC_VID_SIZE(n) DISPC_VID_REG(n, 0x000C)
104#define DISPC_VID_ATTRIBUTES(n) DISPC_VID_REG(n, 0x0010)
105#define DISPC_VID_FIFO_THRESHOLD(n) DISPC_VID_REG(n, 0x0014)
106#define DISPC_VID_FIFO_SIZE_STATUS(n) DISPC_VID_REG(n, 0x0018)
107#define DISPC_VID_ROW_INC(n) DISPC_VID_REG(n, 0x001C)
108#define DISPC_VID_PIXEL_INC(n) DISPC_VID_REG(n, 0x0020)
109#define DISPC_VID_FIR(n) DISPC_VID_REG(n, 0x0024)
110#define DISPC_VID_PICTURE_SIZE(n) DISPC_VID_REG(n, 0x0028)
111#define DISPC_VID_ACCU0(n) DISPC_VID_REG(n, 0x002C)
112#define DISPC_VID_ACCU1(n) DISPC_VID_REG(n, 0x0030)
113
114/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
115#define DISPC_VID_FIR_COEF_H(n, i) DISPC_REG(0x00F0 + (n)*0x90 + (i)*0x8)
116/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
117#define DISPC_VID_FIR_COEF_HV(n, i) DISPC_REG(0x00F4 + (n)*0x90 + (i)*0x8)
118/* coef index i = {0, 1, 2, 3, 4} */
119#define DISPC_VID_CONV_COEF(n, i) DISPC_REG(0x0130 + (n)*0x90 + (i)*0x4)
120/* coef index i = {0, 1, 2, 3, 4, 5, 6, 7} */
121#define DISPC_VID_FIR_COEF_V(n, i) DISPC_REG(0x01E0 + (n)*0x20 + (i)*0x4)
122
123#define DISPC_VID_PRELOAD(n) DISPC_REG(0x230 + (n)*0x04)
124
125
126#define DISPC_IRQ_MASK_ERROR (DISPC_IRQ_GFX_FIFO_UNDERFLOW | \
127 DISPC_IRQ_OCP_ERR | \
128 DISPC_IRQ_VID1_FIFO_UNDERFLOW | \
129 DISPC_IRQ_VID2_FIFO_UNDERFLOW | \
130 DISPC_IRQ_SYNC_LOST | \
131 DISPC_IRQ_SYNC_LOST_DIGIT)
132
133#define DISPC_MAX_NR_ISRS 8
134
135struct omap_dispc_isr_data {
136 omap_dispc_isr_t isr;
137 void *arg;
138 u32 mask;
139};
140
141#define REG_GET(idx, start, end) \
142 FLD_GET(dispc_read_reg(idx), start, end)
143
144#define REG_FLD_MOD(idx, val, start, end) \
145 dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
146
147static const struct dispc_reg dispc_reg_att[] = { DISPC_GFX_ATTRIBUTES,
148 DISPC_VID_ATTRIBUTES(0),
149 DISPC_VID_ATTRIBUTES(1) };
150
151static struct {
152 void __iomem *base;
153
154 u32 fifo_size[3];
155
156 spinlock_t irq_lock;
157 u32 irq_error_mask;
158 struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
159 u32 error_irqs;
160 struct work_struct error_work;
161
162 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
163} dispc;
164
165static void _omap_dispc_set_irqs(void);
166
167static inline void dispc_write_reg(const struct dispc_reg idx, u32 val)
168{
169 __raw_writel(val, dispc.base + idx.idx);
170}
171
172static inline u32 dispc_read_reg(const struct dispc_reg idx)
173{
174 return __raw_readl(dispc.base + idx.idx);
175}
176
177#define SR(reg) \
178 dispc.ctx[(DISPC_##reg).idx / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
179#define RR(reg) \
180 dispc_write_reg(DISPC_##reg, dispc.ctx[(DISPC_##reg).idx / sizeof(u32)])
181
182void dispc_save_context(void)
183{
184 if (cpu_is_omap24xx())
185 return;
186
187 SR(SYSCONFIG);
188 SR(IRQENABLE);
189 SR(CONTROL);
190 SR(CONFIG);
191 SR(DEFAULT_COLOR0);
192 SR(DEFAULT_COLOR1);
193 SR(TRANS_COLOR0);
194 SR(TRANS_COLOR1);
195 SR(LINE_NUMBER);
196 SR(TIMING_H);
197 SR(TIMING_V);
198 SR(POL_FREQ);
199 SR(DIVISOR);
200 SR(GLOBAL_ALPHA);
201 SR(SIZE_DIG);
202 SR(SIZE_LCD);
203
204 SR(GFX_BA0);
205 SR(GFX_BA1);
206 SR(GFX_POSITION);
207 SR(GFX_SIZE);
208 SR(GFX_ATTRIBUTES);
209 SR(GFX_FIFO_THRESHOLD);
210 SR(GFX_ROW_INC);
211 SR(GFX_PIXEL_INC);
212 SR(GFX_WINDOW_SKIP);
213 SR(GFX_TABLE_BA);
214
215 SR(DATA_CYCLE1);
216 SR(DATA_CYCLE2);
217 SR(DATA_CYCLE3);
218
219 SR(CPR_COEF_R);
220 SR(CPR_COEF_G);
221 SR(CPR_COEF_B);
222
223 SR(GFX_PRELOAD);
224
225 /* VID1 */
226 SR(VID_BA0(0));
227 SR(VID_BA1(0));
228 SR(VID_POSITION(0));
229 SR(VID_SIZE(0));
230 SR(VID_ATTRIBUTES(0));
231 SR(VID_FIFO_THRESHOLD(0));
232 SR(VID_ROW_INC(0));
233 SR(VID_PIXEL_INC(0));
234 SR(VID_FIR(0));
235 SR(VID_PICTURE_SIZE(0));
236 SR(VID_ACCU0(0));
237 SR(VID_ACCU1(0));
238
239 SR(VID_FIR_COEF_H(0, 0));
240 SR(VID_FIR_COEF_H(0, 1));
241 SR(VID_FIR_COEF_H(0, 2));
242 SR(VID_FIR_COEF_H(0, 3));
243 SR(VID_FIR_COEF_H(0, 4));
244 SR(VID_FIR_COEF_H(0, 5));
245 SR(VID_FIR_COEF_H(0, 6));
246 SR(VID_FIR_COEF_H(0, 7));
247
248 SR(VID_FIR_COEF_HV(0, 0));
249 SR(VID_FIR_COEF_HV(0, 1));
250 SR(VID_FIR_COEF_HV(0, 2));
251 SR(VID_FIR_COEF_HV(0, 3));
252 SR(VID_FIR_COEF_HV(0, 4));
253 SR(VID_FIR_COEF_HV(0, 5));
254 SR(VID_FIR_COEF_HV(0, 6));
255 SR(VID_FIR_COEF_HV(0, 7));
256
257 SR(VID_CONV_COEF(0, 0));
258 SR(VID_CONV_COEF(0, 1));
259 SR(VID_CONV_COEF(0, 2));
260 SR(VID_CONV_COEF(0, 3));
261 SR(VID_CONV_COEF(0, 4));
262
263 SR(VID_FIR_COEF_V(0, 0));
264 SR(VID_FIR_COEF_V(0, 1));
265 SR(VID_FIR_COEF_V(0, 2));
266 SR(VID_FIR_COEF_V(0, 3));
267 SR(VID_FIR_COEF_V(0, 4));
268 SR(VID_FIR_COEF_V(0, 5));
269 SR(VID_FIR_COEF_V(0, 6));
270 SR(VID_FIR_COEF_V(0, 7));
271
272 SR(VID_PRELOAD(0));
273
274 /* VID2 */
275 SR(VID_BA0(1));
276 SR(VID_BA1(1));
277 SR(VID_POSITION(1));
278 SR(VID_SIZE(1));
279 SR(VID_ATTRIBUTES(1));
280 SR(VID_FIFO_THRESHOLD(1));
281 SR(VID_ROW_INC(1));
282 SR(VID_PIXEL_INC(1));
283 SR(VID_FIR(1));
284 SR(VID_PICTURE_SIZE(1));
285 SR(VID_ACCU0(1));
286 SR(VID_ACCU1(1));
287
288 SR(VID_FIR_COEF_H(1, 0));
289 SR(VID_FIR_COEF_H(1, 1));
290 SR(VID_FIR_COEF_H(1, 2));
291 SR(VID_FIR_COEF_H(1, 3));
292 SR(VID_FIR_COEF_H(1, 4));
293 SR(VID_FIR_COEF_H(1, 5));
294 SR(VID_FIR_COEF_H(1, 6));
295 SR(VID_FIR_COEF_H(1, 7));
296
297 SR(VID_FIR_COEF_HV(1, 0));
298 SR(VID_FIR_COEF_HV(1, 1));
299 SR(VID_FIR_COEF_HV(1, 2));
300 SR(VID_FIR_COEF_HV(1, 3));
301 SR(VID_FIR_COEF_HV(1, 4));
302 SR(VID_FIR_COEF_HV(1, 5));
303 SR(VID_FIR_COEF_HV(1, 6));
304 SR(VID_FIR_COEF_HV(1, 7));
305
306 SR(VID_CONV_COEF(1, 0));
307 SR(VID_CONV_COEF(1, 1));
308 SR(VID_CONV_COEF(1, 2));
309 SR(VID_CONV_COEF(1, 3));
310 SR(VID_CONV_COEF(1, 4));
311
312 SR(VID_FIR_COEF_V(1, 0));
313 SR(VID_FIR_COEF_V(1, 1));
314 SR(VID_FIR_COEF_V(1, 2));
315 SR(VID_FIR_COEF_V(1, 3));
316 SR(VID_FIR_COEF_V(1, 4));
317 SR(VID_FIR_COEF_V(1, 5));
318 SR(VID_FIR_COEF_V(1, 6));
319 SR(VID_FIR_COEF_V(1, 7));
320
321 SR(VID_PRELOAD(1));
322}
323
324void dispc_restore_context(void)
325{
326 RR(SYSCONFIG);
327 RR(IRQENABLE);
328 /*RR(CONTROL);*/
329 RR(CONFIG);
330 RR(DEFAULT_COLOR0);
331 RR(DEFAULT_COLOR1);
332 RR(TRANS_COLOR0);
333 RR(TRANS_COLOR1);
334 RR(LINE_NUMBER);
335 RR(TIMING_H);
336 RR(TIMING_V);
337 RR(POL_FREQ);
338 RR(DIVISOR);
339 RR(GLOBAL_ALPHA);
340 RR(SIZE_DIG);
341 RR(SIZE_LCD);
342
343 RR(GFX_BA0);
344 RR(GFX_BA1);
345 RR(GFX_POSITION);
346 RR(GFX_SIZE);
347 RR(GFX_ATTRIBUTES);
348 RR(GFX_FIFO_THRESHOLD);
349 RR(GFX_ROW_INC);
350 RR(GFX_PIXEL_INC);
351 RR(GFX_WINDOW_SKIP);
352 RR(GFX_TABLE_BA);
353
354 RR(DATA_CYCLE1);
355 RR(DATA_CYCLE2);
356 RR(DATA_CYCLE3);
357
358 RR(CPR_COEF_R);
359 RR(CPR_COEF_G);
360 RR(CPR_COEF_B);
361
362 RR(GFX_PRELOAD);
363
364 /* VID1 */
365 RR(VID_BA0(0));
366 RR(VID_BA1(0));
367 RR(VID_POSITION(0));
368 RR(VID_SIZE(0));
369 RR(VID_ATTRIBUTES(0));
370 RR(VID_FIFO_THRESHOLD(0));
371 RR(VID_ROW_INC(0));
372 RR(VID_PIXEL_INC(0));
373 RR(VID_FIR(0));
374 RR(VID_PICTURE_SIZE(0));
375 RR(VID_ACCU0(0));
376 RR(VID_ACCU1(0));
377
378 RR(VID_FIR_COEF_H(0, 0));
379 RR(VID_FIR_COEF_H(0, 1));
380 RR(VID_FIR_COEF_H(0, 2));
381 RR(VID_FIR_COEF_H(0, 3));
382 RR(VID_FIR_COEF_H(0, 4));
383 RR(VID_FIR_COEF_H(0, 5));
384 RR(VID_FIR_COEF_H(0, 6));
385 RR(VID_FIR_COEF_H(0, 7));
386
387 RR(VID_FIR_COEF_HV(0, 0));
388 RR(VID_FIR_COEF_HV(0, 1));
389 RR(VID_FIR_COEF_HV(0, 2));
390 RR(VID_FIR_COEF_HV(0, 3));
391 RR(VID_FIR_COEF_HV(0, 4));
392 RR(VID_FIR_COEF_HV(0, 5));
393 RR(VID_FIR_COEF_HV(0, 6));
394 RR(VID_FIR_COEF_HV(0, 7));
395
396 RR(VID_CONV_COEF(0, 0));
397 RR(VID_CONV_COEF(0, 1));
398 RR(VID_CONV_COEF(0, 2));
399 RR(VID_CONV_COEF(0, 3));
400 RR(VID_CONV_COEF(0, 4));
401
402 RR(VID_FIR_COEF_V(0, 0));
403 RR(VID_FIR_COEF_V(0, 1));
404 RR(VID_FIR_COEF_V(0, 2));
405 RR(VID_FIR_COEF_V(0, 3));
406 RR(VID_FIR_COEF_V(0, 4));
407 RR(VID_FIR_COEF_V(0, 5));
408 RR(VID_FIR_COEF_V(0, 6));
409 RR(VID_FIR_COEF_V(0, 7));
410
411 RR(VID_PRELOAD(0));
412
413 /* VID2 */
414 RR(VID_BA0(1));
415 RR(VID_BA1(1));
416 RR(VID_POSITION(1));
417 RR(VID_SIZE(1));
418 RR(VID_ATTRIBUTES(1));
419 RR(VID_FIFO_THRESHOLD(1));
420 RR(VID_ROW_INC(1));
421 RR(VID_PIXEL_INC(1));
422 RR(VID_FIR(1));
423 RR(VID_PICTURE_SIZE(1));
424 RR(VID_ACCU0(1));
425 RR(VID_ACCU1(1));
426
427 RR(VID_FIR_COEF_H(1, 0));
428 RR(VID_FIR_COEF_H(1, 1));
429 RR(VID_FIR_COEF_H(1, 2));
430 RR(VID_FIR_COEF_H(1, 3));
431 RR(VID_FIR_COEF_H(1, 4));
432 RR(VID_FIR_COEF_H(1, 5));
433 RR(VID_FIR_COEF_H(1, 6));
434 RR(VID_FIR_COEF_H(1, 7));
435
436 RR(VID_FIR_COEF_HV(1, 0));
437 RR(VID_FIR_COEF_HV(1, 1));
438 RR(VID_FIR_COEF_HV(1, 2));
439 RR(VID_FIR_COEF_HV(1, 3));
440 RR(VID_FIR_COEF_HV(1, 4));
441 RR(VID_FIR_COEF_HV(1, 5));
442 RR(VID_FIR_COEF_HV(1, 6));
443 RR(VID_FIR_COEF_HV(1, 7));
444
445 RR(VID_CONV_COEF(1, 0));
446 RR(VID_CONV_COEF(1, 1));
447 RR(VID_CONV_COEF(1, 2));
448 RR(VID_CONV_COEF(1, 3));
449 RR(VID_CONV_COEF(1, 4));
450
451 RR(VID_FIR_COEF_V(1, 0));
452 RR(VID_FIR_COEF_V(1, 1));
453 RR(VID_FIR_COEF_V(1, 2));
454 RR(VID_FIR_COEF_V(1, 3));
455 RR(VID_FIR_COEF_V(1, 4));
456 RR(VID_FIR_COEF_V(1, 5));
457 RR(VID_FIR_COEF_V(1, 6));
458 RR(VID_FIR_COEF_V(1, 7));
459
460 RR(VID_PRELOAD(1));
461
462 /* enable last, because LCD & DIGIT enable are here */
463 RR(CONTROL);
464}
465
466#undef SR
467#undef RR
468
469static inline void enable_clocks(bool enable)
470{
471 if (enable)
472 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
473 else
474 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
475}
476
477bool dispc_go_busy(enum omap_channel channel)
478{
479 int bit;
480
481 if (channel == OMAP_DSS_CHANNEL_LCD)
482 bit = 5; /* GOLCD */
483 else
484 bit = 6; /* GODIGIT */
485
486 return REG_GET(DISPC_CONTROL, bit, bit) == 1;
487}
488
489void dispc_go(enum omap_channel channel)
490{
491 int bit;
492
493 enable_clocks(1);
494
495 if (channel == OMAP_DSS_CHANNEL_LCD)
496 bit = 0; /* LCDENABLE */
497 else
498 bit = 1; /* DIGITALENABLE */
499
500 /* if the channel is not enabled, we don't need GO */
501 if (REG_GET(DISPC_CONTROL, bit, bit) == 0)
502 goto end;
503
504 if (channel == OMAP_DSS_CHANNEL_LCD)
505 bit = 5; /* GOLCD */
506 else
507 bit = 6; /* GODIGIT */
508
509 if (REG_GET(DISPC_CONTROL, bit, bit) == 1) {
510 DSSERR("GO bit not down for channel %d\n", channel);
511 goto end;
512 }
513
514 DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : "DIGIT");
515
516 REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit);
517end:
518 enable_clocks(0);
519}
520
521static void _dispc_write_firh_reg(enum omap_plane plane, int reg, u32 value)
522{
523 BUG_ON(plane == OMAP_DSS_GFX);
524
525 dispc_write_reg(DISPC_VID_FIR_COEF_H(plane-1, reg), value);
526}
527
528static void _dispc_write_firhv_reg(enum omap_plane plane, int reg, u32 value)
529{
530 BUG_ON(plane == OMAP_DSS_GFX);
531
532 dispc_write_reg(DISPC_VID_FIR_COEF_HV(plane-1, reg), value);
533}
534
535static void _dispc_write_firv_reg(enum omap_plane plane, int reg, u32 value)
536{
537 BUG_ON(plane == OMAP_DSS_GFX);
538
539 dispc_write_reg(DISPC_VID_FIR_COEF_V(plane-1, reg), value);
540}
541
542static void _dispc_set_scale_coef(enum omap_plane plane, int hscaleup,
543 int vscaleup, int five_taps)
544{
545 /* Coefficients for horizontal up-sampling */
546 static const u32 coef_hup[8] = {
547 0x00800000,
548 0x0D7CF800,
549 0x1E70F5FF,
550 0x335FF5FE,
551 0xF74949F7,
552 0xF55F33FB,
553 0xF5701EFE,
554 0xF87C0DFF,
555 };
556
557 /* Coefficients for horizontal down-sampling */
558 static const u32 coef_hdown[8] = {
559 0x24382400,
560 0x28371FFE,
561 0x2C361BFB,
562 0x303516F9,
563 0x11343311,
564 0x1635300C,
565 0x1B362C08,
566 0x1F372804,
567 };
568
569 /* Coefficients for horizontal and vertical up-sampling */
570 static const u32 coef_hvup[2][8] = {
571 {
572 0x00800000,
573 0x037B02FF,
574 0x0C6F05FE,
575 0x205907FB,
576 0x00404000,
577 0x075920FE,
578 0x056F0CFF,
579 0x027B0300,
580 },
581 {
582 0x00800000,
583 0x0D7CF8FF,
584 0x1E70F5FE,
585 0x335FF5FB,
586 0xF7404000,
587 0xF55F33FE,
588 0xF5701EFF,
589 0xF87C0D00,
590 },
591 };
592
593 /* Coefficients for horizontal and vertical down-sampling */
594 static const u32 coef_hvdown[2][8] = {
595 {
596 0x24382400,
597 0x28391F04,
598 0x2D381B08,
599 0x3237170C,
600 0x123737F7,
601 0x173732F9,
602 0x1B382DFB,
603 0x1F3928FE,
604 },
605 {
606 0x24382400,
607 0x28371F04,
608 0x2C361B08,
609 0x3035160C,
610 0x113433F7,
611 0x163530F9,
612 0x1B362CFB,
613 0x1F3728FE,
614 },
615 };
616
617 /* Coefficients for vertical up-sampling */
618 static const u32 coef_vup[8] = {
619 0x00000000,
620 0x0000FF00,
621 0x0000FEFF,
622 0x0000FBFE,
623 0x000000F7,
624 0x0000FEFB,
625 0x0000FFFE,
626 0x000000FF,
627 };
628
629
630 /* Coefficients for vertical down-sampling */
631 static const u32 coef_vdown[8] = {
632 0x00000000,
633 0x000004FE,
634 0x000008FB,
635 0x00000CF9,
636 0x0000F711,
637 0x0000F90C,
638 0x0000FB08,
639 0x0000FE04,
640 };
641
642 const u32 *h_coef;
643 const u32 *hv_coef;
644 const u32 *hv_coef_mod;
645 const u32 *v_coef;
646 int i;
647
648 if (hscaleup)
649 h_coef = coef_hup;
650 else
651 h_coef = coef_hdown;
652
653 if (vscaleup) {
654 hv_coef = coef_hvup[five_taps];
655 v_coef = coef_vup;
656
657 if (hscaleup)
658 hv_coef_mod = NULL;
659 else
660 hv_coef_mod = coef_hvdown[five_taps];
661 } else {
662 hv_coef = coef_hvdown[five_taps];
663 v_coef = coef_vdown;
664
665 if (hscaleup)
666 hv_coef_mod = coef_hvup[five_taps];
667 else
668 hv_coef_mod = NULL;
669 }
670
671 for (i = 0; i < 8; i++) {
672 u32 h, hv;
673
674 h = h_coef[i];
675
676 hv = hv_coef[i];
677
678 if (hv_coef_mod) {
679 hv &= 0xffffff00;
680 hv |= (hv_coef_mod[i] & 0xff);
681 }
682
683 _dispc_write_firh_reg(plane, i, h);
684 _dispc_write_firhv_reg(plane, i, hv);
685 }
686
687 if (!five_taps)
688 return;
689
690 for (i = 0; i < 8; i++) {
691 u32 v;
692 v = v_coef[i];
693 _dispc_write_firv_reg(plane, i, v);
694 }
695}
696
697static void _dispc_setup_color_conv_coef(void)
698{
699 const struct color_conv_coef {
700 int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
701 int full_range;
702 } ctbl_bt601_5 = {
703 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
704 };
705
706 const struct color_conv_coef *ct;
707
708#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
709
710 ct = &ctbl_bt601_5;
711
712 dispc_write_reg(DISPC_VID_CONV_COEF(0, 0), CVAL(ct->rcr, ct->ry));
713 dispc_write_reg(DISPC_VID_CONV_COEF(0, 1), CVAL(ct->gy, ct->rcb));
714 dispc_write_reg(DISPC_VID_CONV_COEF(0, 2), CVAL(ct->gcb, ct->gcr));
715 dispc_write_reg(DISPC_VID_CONV_COEF(0, 3), CVAL(ct->bcr, ct->by));
716 dispc_write_reg(DISPC_VID_CONV_COEF(0, 4), CVAL(0, ct->bcb));
717
718 dispc_write_reg(DISPC_VID_CONV_COEF(1, 0), CVAL(ct->rcr, ct->ry));
719 dispc_write_reg(DISPC_VID_CONV_COEF(1, 1), CVAL(ct->gy, ct->rcb));
720 dispc_write_reg(DISPC_VID_CONV_COEF(1, 2), CVAL(ct->gcb, ct->gcr));
721 dispc_write_reg(DISPC_VID_CONV_COEF(1, 3), CVAL(ct->bcr, ct->by));
722 dispc_write_reg(DISPC_VID_CONV_COEF(1, 4), CVAL(0, ct->bcb));
723
724#undef CVAL
725
726 REG_FLD_MOD(DISPC_VID_ATTRIBUTES(0), ct->full_range, 11, 11);
727 REG_FLD_MOD(DISPC_VID_ATTRIBUTES(1), ct->full_range, 11, 11);
728}
729
730
731static void _dispc_set_plane_ba0(enum omap_plane plane, u32 paddr)
732{
733 const struct dispc_reg ba0_reg[] = { DISPC_GFX_BA0,
734 DISPC_VID_BA0(0),
735 DISPC_VID_BA0(1) };
736
737 dispc_write_reg(ba0_reg[plane], paddr);
738}
739
740static void _dispc_set_plane_ba1(enum omap_plane plane, u32 paddr)
741{
742 const struct dispc_reg ba1_reg[] = { DISPC_GFX_BA1,
743 DISPC_VID_BA1(0),
744 DISPC_VID_BA1(1) };
745
746 dispc_write_reg(ba1_reg[plane], paddr);
747}
748
749static void _dispc_set_plane_pos(enum omap_plane plane, int x, int y)
750{
751 const struct dispc_reg pos_reg[] = { DISPC_GFX_POSITION,
752 DISPC_VID_POSITION(0),
753 DISPC_VID_POSITION(1) };
754
755 u32 val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
756 dispc_write_reg(pos_reg[plane], val);
757}
758
759static void _dispc_set_pic_size(enum omap_plane plane, int width, int height)
760{
761 const struct dispc_reg siz_reg[] = { DISPC_GFX_SIZE,
762 DISPC_VID_PICTURE_SIZE(0),
763 DISPC_VID_PICTURE_SIZE(1) };
764 u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
765 dispc_write_reg(siz_reg[plane], val);
766}
767
768static void _dispc_set_vid_size(enum omap_plane plane, int width, int height)
769{
770 u32 val;
771 const struct dispc_reg vsi_reg[] = { DISPC_VID_SIZE(0),
772 DISPC_VID_SIZE(1) };
773
774 BUG_ON(plane == OMAP_DSS_GFX);
775
776 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
777 dispc_write_reg(vsi_reg[plane-1], val);
778}
779
780static void _dispc_setup_global_alpha(enum omap_plane plane, u8 global_alpha)
781{
782
783 BUG_ON(plane == OMAP_DSS_VIDEO1);
784
785 if (cpu_is_omap24xx())
786 return;
787
788 if (plane == OMAP_DSS_GFX)
789 REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 7, 0);
790 else if (plane == OMAP_DSS_VIDEO2)
791 REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, 23, 16);
792}
793
794static void _dispc_set_pix_inc(enum omap_plane plane, s32 inc)
795{
796 const struct dispc_reg ri_reg[] = { DISPC_GFX_PIXEL_INC,
797 DISPC_VID_PIXEL_INC(0),
798 DISPC_VID_PIXEL_INC(1) };
799
800 dispc_write_reg(ri_reg[plane], inc);
801}
802
803static void _dispc_set_row_inc(enum omap_plane plane, s32 inc)
804{
805 const struct dispc_reg ri_reg[] = { DISPC_GFX_ROW_INC,
806 DISPC_VID_ROW_INC(0),
807 DISPC_VID_ROW_INC(1) };
808
809 dispc_write_reg(ri_reg[plane], inc);
810}
811
812static void _dispc_set_color_mode(enum omap_plane plane,
813 enum omap_color_mode color_mode)
814{
815 u32 m = 0;
816
817 switch (color_mode) {
818 case OMAP_DSS_COLOR_CLUT1:
819 m = 0x0; break;
820 case OMAP_DSS_COLOR_CLUT2:
821 m = 0x1; break;
822 case OMAP_DSS_COLOR_CLUT4:
823 m = 0x2; break;
824 case OMAP_DSS_COLOR_CLUT8:
825 m = 0x3; break;
826 case OMAP_DSS_COLOR_RGB12U:
827 m = 0x4; break;
828 case OMAP_DSS_COLOR_ARGB16:
829 m = 0x5; break;
830 case OMAP_DSS_COLOR_RGB16:
831 m = 0x6; break;
832 case OMAP_DSS_COLOR_RGB24U:
833 m = 0x8; break;
834 case OMAP_DSS_COLOR_RGB24P:
835 m = 0x9; break;
836 case OMAP_DSS_COLOR_YUV2:
837 m = 0xa; break;
838 case OMAP_DSS_COLOR_UYVY:
839 m = 0xb; break;
840 case OMAP_DSS_COLOR_ARGB32:
841 m = 0xc; break;
842 case OMAP_DSS_COLOR_RGBA32:
843 m = 0xd; break;
844 case OMAP_DSS_COLOR_RGBX32:
845 m = 0xe; break;
846 default:
847 BUG(); break;
848 }
849
850 REG_FLD_MOD(dispc_reg_att[plane], m, 4, 1);
851}
852
853static void _dispc_set_channel_out(enum omap_plane plane,
854 enum omap_channel channel)
855{
856 int shift;
857 u32 val;
858
859 switch (plane) {
860 case OMAP_DSS_GFX:
861 shift = 8;
862 break;
863 case OMAP_DSS_VIDEO1:
864 case OMAP_DSS_VIDEO2:
865 shift = 16;
866 break;
867 default:
868 BUG();
869 return;
870 }
871
872 val = dispc_read_reg(dispc_reg_att[plane]);
873 val = FLD_MOD(val, channel, shift, shift);
874 dispc_write_reg(dispc_reg_att[plane], val);
875}
876
877void dispc_set_burst_size(enum omap_plane plane,
878 enum omap_burst_size burst_size)
879{
880 int shift;
881 u32 val;
882
883 enable_clocks(1);
884
885 switch (plane) {
886 case OMAP_DSS_GFX:
887 shift = 6;
888 break;
889 case OMAP_DSS_VIDEO1:
890 case OMAP_DSS_VIDEO2:
891 shift = 14;
892 break;
893 default:
894 BUG();
895 return;
896 }
897
898 val = dispc_read_reg(dispc_reg_att[plane]);
899 val = FLD_MOD(val, burst_size, shift+1, shift);
900 dispc_write_reg(dispc_reg_att[plane], val);
901
902 enable_clocks(0);
903}
904
905static void _dispc_set_vid_color_conv(enum omap_plane plane, bool enable)
906{
907 u32 val;
908
909 BUG_ON(plane == OMAP_DSS_GFX);
910
911 val = dispc_read_reg(dispc_reg_att[plane]);
912 val = FLD_MOD(val, enable, 9, 9);
913 dispc_write_reg(dispc_reg_att[plane], val);
914}
915
916void dispc_enable_replication(enum omap_plane plane, bool enable)
917{
918 int bit;
919
920 if (plane == OMAP_DSS_GFX)
921 bit = 5;
922 else
923 bit = 10;
924
925 enable_clocks(1);
926 REG_FLD_MOD(dispc_reg_att[plane], enable, bit, bit);
927 enable_clocks(0);
928}
929
930void dispc_set_lcd_size(u16 width, u16 height)
931{
932 u32 val;
933 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
934 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
935 enable_clocks(1);
936 dispc_write_reg(DISPC_SIZE_LCD, val);
937 enable_clocks(0);
938}
939
940void dispc_set_digit_size(u16 width, u16 height)
941{
942 u32 val;
943 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
944 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
945 enable_clocks(1);
946 dispc_write_reg(DISPC_SIZE_DIG, val);
947 enable_clocks(0);
948}
949
950static void dispc_read_plane_fifo_sizes(void)
951{
952 const struct dispc_reg fsz_reg[] = { DISPC_GFX_FIFO_SIZE_STATUS,
953 DISPC_VID_FIFO_SIZE_STATUS(0),
954 DISPC_VID_FIFO_SIZE_STATUS(1) };
955 u32 size;
956 int plane;
957
958 enable_clocks(1);
959
960 for (plane = 0; plane < ARRAY_SIZE(dispc.fifo_size); ++plane) {
961 if (cpu_is_omap24xx())
962 size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 8, 0);
963 else if (cpu_is_omap34xx())
964 size = FLD_GET(dispc_read_reg(fsz_reg[plane]), 10, 0);
965 else
966 BUG();
967
968 dispc.fifo_size[plane] = size;
969 }
970
971 enable_clocks(0);
972}
973
974u32 dispc_get_plane_fifo_size(enum omap_plane plane)
975{
976 return dispc.fifo_size[plane];
977}
978
979void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high)
980{
981 const struct dispc_reg ftrs_reg[] = { DISPC_GFX_FIFO_THRESHOLD,
982 DISPC_VID_FIFO_THRESHOLD(0),
983 DISPC_VID_FIFO_THRESHOLD(1) };
984 enable_clocks(1);
985
986 DSSDBG("fifo(%d) low/high old %u/%u, new %u/%u\n",
987 plane,
988 REG_GET(ftrs_reg[plane], 11, 0),
989 REG_GET(ftrs_reg[plane], 27, 16),
990 low, high);
991
992 if (cpu_is_omap24xx())
993 dispc_write_reg(ftrs_reg[plane],
994 FLD_VAL(high, 24, 16) | FLD_VAL(low, 8, 0));
995 else
996 dispc_write_reg(ftrs_reg[plane],
997 FLD_VAL(high, 27, 16) | FLD_VAL(low, 11, 0));
998
999 enable_clocks(0);
1000}
1001
1002void dispc_enable_fifomerge(bool enable)
1003{
1004 enable_clocks(1);
1005
1006 DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
1007 REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
1008
1009 enable_clocks(0);
1010}
1011
1012static void _dispc_set_fir(enum omap_plane plane, int hinc, int vinc)
1013{
1014 u32 val;
1015 const struct dispc_reg fir_reg[] = { DISPC_VID_FIR(0),
1016 DISPC_VID_FIR(1) };
1017
1018 BUG_ON(plane == OMAP_DSS_GFX);
1019
1020 if (cpu_is_omap24xx())
1021 val = FLD_VAL(vinc, 27, 16) | FLD_VAL(hinc, 11, 0);
1022 else
1023 val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
1024 dispc_write_reg(fir_reg[plane-1], val);
1025}
1026
1027static void _dispc_set_vid_accu0(enum omap_plane plane, int haccu, int vaccu)
1028{
1029 u32 val;
1030 const struct dispc_reg ac0_reg[] = { DISPC_VID_ACCU0(0),
1031 DISPC_VID_ACCU0(1) };
1032
1033 BUG_ON(plane == OMAP_DSS_GFX);
1034
1035 val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
1036 dispc_write_reg(ac0_reg[plane-1], val);
1037}
1038
1039static void _dispc_set_vid_accu1(enum omap_plane plane, int haccu, int vaccu)
1040{
1041 u32 val;
1042 const struct dispc_reg ac1_reg[] = { DISPC_VID_ACCU1(0),
1043 DISPC_VID_ACCU1(1) };
1044
1045 BUG_ON(plane == OMAP_DSS_GFX);
1046
1047 val = FLD_VAL(vaccu, 25, 16) | FLD_VAL(haccu, 9, 0);
1048 dispc_write_reg(ac1_reg[plane-1], val);
1049}
1050
1051
1052static void _dispc_set_scaling(enum omap_plane plane,
1053 u16 orig_width, u16 orig_height,
1054 u16 out_width, u16 out_height,
1055 bool ilace, bool five_taps,
1056 bool fieldmode)
1057{
1058 int fir_hinc;
1059 int fir_vinc;
1060 int hscaleup, vscaleup;
1061 int accu0 = 0;
1062 int accu1 = 0;
1063 u32 l;
1064
1065 BUG_ON(plane == OMAP_DSS_GFX);
1066
1067 hscaleup = orig_width <= out_width;
1068 vscaleup = orig_height <= out_height;
1069
1070 _dispc_set_scale_coef(plane, hscaleup, vscaleup, five_taps);
1071
1072 if (!orig_width || orig_width == out_width)
1073 fir_hinc = 0;
1074 else
1075 fir_hinc = 1024 * orig_width / out_width;
1076
1077 if (!orig_height || orig_height == out_height)
1078 fir_vinc = 0;
1079 else
1080 fir_vinc = 1024 * orig_height / out_height;
1081
1082 _dispc_set_fir(plane, fir_hinc, fir_vinc);
1083
1084 l = dispc_read_reg(dispc_reg_att[plane]);
1085 l &= ~((0x0f << 5) | (0x3 << 21));
1086
1087 l |= fir_hinc ? (1 << 5) : 0;
1088 l |= fir_vinc ? (1 << 6) : 0;
1089
1090 l |= hscaleup ? 0 : (1 << 7);
1091 l |= vscaleup ? 0 : (1 << 8);
1092
1093 l |= five_taps ? (1 << 21) : 0;
1094 l |= five_taps ? (1 << 22) : 0;
1095
1096 dispc_write_reg(dispc_reg_att[plane], l);
1097
1098 /*
1099 * field 0 = even field = bottom field
1100 * field 1 = odd field = top field
1101 */
1102 if (ilace && !fieldmode) {
1103 accu1 = 0;
1104 accu0 = (fir_vinc / 2) & 0x3ff;
1105 if (accu0 >= 1024/2) {
1106 accu1 = 1024/2;
1107 accu0 -= accu1;
1108 }
1109 }
1110
1111 _dispc_set_vid_accu0(plane, 0, accu0);
1112 _dispc_set_vid_accu1(plane, 0, accu1);
1113}
1114
1115static void _dispc_set_rotation_attrs(enum omap_plane plane, u8 rotation,
1116 bool mirroring, enum omap_color_mode color_mode)
1117{
1118 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1119 color_mode == OMAP_DSS_COLOR_UYVY) {
1120 int vidrot = 0;
1121
1122 if (mirroring) {
1123 switch (rotation) {
1124 case OMAP_DSS_ROT_0:
1125 vidrot = 2;
1126 break;
1127 case OMAP_DSS_ROT_90:
1128 vidrot = 1;
1129 break;
1130 case OMAP_DSS_ROT_180:
1131 vidrot = 0;
1132 break;
1133 case OMAP_DSS_ROT_270:
1134 vidrot = 3;
1135 break;
1136 }
1137 } else {
1138 switch (rotation) {
1139 case OMAP_DSS_ROT_0:
1140 vidrot = 0;
1141 break;
1142 case OMAP_DSS_ROT_90:
1143 vidrot = 1;
1144 break;
1145 case OMAP_DSS_ROT_180:
1146 vidrot = 2;
1147 break;
1148 case OMAP_DSS_ROT_270:
1149 vidrot = 3;
1150 break;
1151 }
1152 }
1153
1154 REG_FLD_MOD(dispc_reg_att[plane], vidrot, 13, 12);
1155
1156 if (rotation == OMAP_DSS_ROT_90 || rotation == OMAP_DSS_ROT_270)
1157 REG_FLD_MOD(dispc_reg_att[plane], 0x1, 18, 18);
1158 else
1159 REG_FLD_MOD(dispc_reg_att[plane], 0x0, 18, 18);
1160 } else {
1161 REG_FLD_MOD(dispc_reg_att[plane], 0, 13, 12);
1162 REG_FLD_MOD(dispc_reg_att[plane], 0, 18, 18);
1163 }
1164}
1165
1166static int color_mode_to_bpp(enum omap_color_mode color_mode)
1167{
1168 switch (color_mode) {
1169 case OMAP_DSS_COLOR_CLUT1:
1170 return 1;
1171 case OMAP_DSS_COLOR_CLUT2:
1172 return 2;
1173 case OMAP_DSS_COLOR_CLUT4:
1174 return 4;
1175 case OMAP_DSS_COLOR_CLUT8:
1176 return 8;
1177 case OMAP_DSS_COLOR_RGB12U:
1178 case OMAP_DSS_COLOR_RGB16:
1179 case OMAP_DSS_COLOR_ARGB16:
1180 case OMAP_DSS_COLOR_YUV2:
1181 case OMAP_DSS_COLOR_UYVY:
1182 return 16;
1183 case OMAP_DSS_COLOR_RGB24P:
1184 return 24;
1185 case OMAP_DSS_COLOR_RGB24U:
1186 case OMAP_DSS_COLOR_ARGB32:
1187 case OMAP_DSS_COLOR_RGBA32:
1188 case OMAP_DSS_COLOR_RGBX32:
1189 return 32;
1190 default:
1191 BUG();
1192 }
1193}
1194
1195static s32 pixinc(int pixels, u8 ps)
1196{
1197 if (pixels == 1)
1198 return 1;
1199 else if (pixels > 1)
1200 return 1 + (pixels - 1) * ps;
1201 else if (pixels < 0)
1202 return 1 - (-pixels + 1) * ps;
1203 else
1204 BUG();
1205}
1206
1207static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1208 u16 screen_width,
1209 u16 width, u16 height,
1210 enum omap_color_mode color_mode, bool fieldmode,
1211 unsigned int field_offset,
1212 unsigned *offset0, unsigned *offset1,
1213 s32 *row_inc, s32 *pix_inc)
1214{
1215 u8 ps;
1216
1217 /* FIXME CLUT formats */
1218 switch (color_mode) {
1219 case OMAP_DSS_COLOR_CLUT1:
1220 case OMAP_DSS_COLOR_CLUT2:
1221 case OMAP_DSS_COLOR_CLUT4:
1222 case OMAP_DSS_COLOR_CLUT8:
1223 BUG();
1224 return;
1225 case OMAP_DSS_COLOR_YUV2:
1226 case OMAP_DSS_COLOR_UYVY:
1227 ps = 4;
1228 break;
1229 default:
1230 ps = color_mode_to_bpp(color_mode) / 8;
1231 break;
1232 }
1233
1234 DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
1235 width, height);
1236
1237 /*
1238 * field 0 = even field = bottom field
1239 * field 1 = odd field = top field
1240 */
1241 switch (rotation + mirror * 4) {
1242 case OMAP_DSS_ROT_0:
1243 case OMAP_DSS_ROT_180:
1244 /*
1245 * If the pixel format is YUV or UYVY divide the width
1246 * of the image by 2 for 0 and 180 degree rotation.
1247 */
1248 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1249 color_mode == OMAP_DSS_COLOR_UYVY)
1250 width = width >> 1;
1251 case OMAP_DSS_ROT_90:
1252 case OMAP_DSS_ROT_270:
1253 *offset1 = 0;
1254 if (field_offset)
1255 *offset0 = field_offset * screen_width * ps;
1256 else
1257 *offset0 = 0;
1258
1259 *row_inc = pixinc(1 + (screen_width - width) +
1260 (fieldmode ? screen_width : 0),
1261 ps);
1262 *pix_inc = pixinc(1, ps);
1263 break;
1264
1265 case OMAP_DSS_ROT_0 + 4:
1266 case OMAP_DSS_ROT_180 + 4:
1267 /* If the pixel format is YUV or UYVY divide the width
1268 * of the image by 2 for 0 degree and 180 degree
1269 */
1270 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1271 color_mode == OMAP_DSS_COLOR_UYVY)
1272 width = width >> 1;
1273 case OMAP_DSS_ROT_90 + 4:
1274 case OMAP_DSS_ROT_270 + 4:
1275 *offset1 = 0;
1276 if (field_offset)
1277 *offset0 = field_offset * screen_width * ps;
1278 else
1279 *offset0 = 0;
1280 *row_inc = pixinc(1 - (screen_width + width) -
1281 (fieldmode ? screen_width : 0),
1282 ps);
1283 *pix_inc = pixinc(1, ps);
1284 break;
1285
1286 default:
1287 BUG();
1288 }
1289}
1290
1291static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1292 u16 screen_width,
1293 u16 width, u16 height,
1294 enum omap_color_mode color_mode, bool fieldmode,
1295 unsigned int field_offset,
1296 unsigned *offset0, unsigned *offset1,
1297 s32 *row_inc, s32 *pix_inc)
1298{
1299 u8 ps;
1300 u16 fbw, fbh;
1301
1302 /* FIXME CLUT formats */
1303 switch (color_mode) {
1304 case OMAP_DSS_COLOR_CLUT1:
1305 case OMAP_DSS_COLOR_CLUT2:
1306 case OMAP_DSS_COLOR_CLUT4:
1307 case OMAP_DSS_COLOR_CLUT8:
1308 BUG();
1309 return;
1310 default:
1311 ps = color_mode_to_bpp(color_mode) / 8;
1312 break;
1313 }
1314
1315 DSSDBG("calc_rot(%d): scrw %d, %dx%d\n", rotation, screen_width,
1316 width, height);
1317
1318 /* width & height are overlay sizes, convert to fb sizes */
1319
1320 if (rotation == OMAP_DSS_ROT_0 || rotation == OMAP_DSS_ROT_180) {
1321 fbw = width;
1322 fbh = height;
1323 } else {
1324 fbw = height;
1325 fbh = width;
1326 }
1327
1328 /*
1329 * field 0 = even field = bottom field
1330 * field 1 = odd field = top field
1331 */
1332 switch (rotation + mirror * 4) {
1333 case OMAP_DSS_ROT_0:
1334 *offset1 = 0;
1335 if (field_offset)
1336 *offset0 = *offset1 + field_offset * screen_width * ps;
1337 else
1338 *offset0 = *offset1;
1339 *row_inc = pixinc(1 + (screen_width - fbw) +
1340 (fieldmode ? screen_width : 0),
1341 ps);
1342 *pix_inc = pixinc(1, ps);
1343 break;
1344 case OMAP_DSS_ROT_90:
1345 *offset1 = screen_width * (fbh - 1) * ps;
1346 if (field_offset)
1347 *offset0 = *offset1 + field_offset * ps;
1348 else
1349 *offset0 = *offset1;
1350 *row_inc = pixinc(screen_width * (fbh - 1) + 1 +
1351 (fieldmode ? 1 : 0), ps);
1352 *pix_inc = pixinc(-screen_width, ps);
1353 break;
1354 case OMAP_DSS_ROT_180:
1355 *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
1356 if (field_offset)
1357 *offset0 = *offset1 - field_offset * screen_width * ps;
1358 else
1359 *offset0 = *offset1;
1360 *row_inc = pixinc(-1 -
1361 (screen_width - fbw) -
1362 (fieldmode ? screen_width : 0),
1363 ps);
1364 *pix_inc = pixinc(-1, ps);
1365 break;
1366 case OMAP_DSS_ROT_270:
1367 *offset1 = (fbw - 1) * ps;
1368 if (field_offset)
1369 *offset0 = *offset1 - field_offset * ps;
1370 else
1371 *offset0 = *offset1;
1372 *row_inc = pixinc(-screen_width * (fbh - 1) - 1 -
1373 (fieldmode ? 1 : 0), ps);
1374 *pix_inc = pixinc(screen_width, ps);
1375 break;
1376
1377 /* mirroring */
1378 case OMAP_DSS_ROT_0 + 4:
1379 *offset1 = (fbw - 1) * ps;
1380 if (field_offset)
1381 *offset0 = *offset1 + field_offset * screen_width * ps;
1382 else
1383 *offset0 = *offset1;
1384 *row_inc = pixinc(screen_width * 2 - 1 +
1385 (fieldmode ? screen_width : 0),
1386 ps);
1387 *pix_inc = pixinc(-1, ps);
1388 break;
1389
1390 case OMAP_DSS_ROT_90 + 4:
1391 *offset1 = 0;
1392 if (field_offset)
1393 *offset0 = *offset1 + field_offset * ps;
1394 else
1395 *offset0 = *offset1;
1396 *row_inc = pixinc(-screen_width * (fbh - 1) + 1 +
1397 (fieldmode ? 1 : 0),
1398 ps);
1399 *pix_inc = pixinc(screen_width, ps);
1400 break;
1401
1402 case OMAP_DSS_ROT_180 + 4:
1403 *offset1 = screen_width * (fbh - 1) * ps;
1404 if (field_offset)
1405 *offset0 = *offset1 - field_offset * screen_width * ps;
1406 else
1407 *offset0 = *offset1;
1408 *row_inc = pixinc(1 - screen_width * 2 -
1409 (fieldmode ? screen_width : 0),
1410 ps);
1411 *pix_inc = pixinc(1, ps);
1412 break;
1413
1414 case OMAP_DSS_ROT_270 + 4:
1415 *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
1416 if (field_offset)
1417 *offset0 = *offset1 - field_offset * ps;
1418 else
1419 *offset0 = *offset1;
1420 *row_inc = pixinc(screen_width * (fbh - 1) - 1 -
1421 (fieldmode ? 1 : 0),
1422 ps);
1423 *pix_inc = pixinc(-screen_width, ps);
1424 break;
1425
1426 default:
1427 BUG();
1428 }
1429}
1430
1431static unsigned long calc_fclk_five_taps(u16 width, u16 height,
1432 u16 out_width, u16 out_height, enum omap_color_mode color_mode)
1433{
1434 u32 fclk = 0;
1435 /* FIXME venc pclk? */
1436 u64 tmp, pclk = dispc_pclk_rate();
1437
1438 if (height > out_height) {
1439 /* FIXME get real display PPL */
1440 unsigned int ppl = 800;
1441
1442 tmp = pclk * height * out_width;
1443 do_div(tmp, 2 * out_height * ppl);
1444 fclk = tmp;
1445
1446 if (height > 2 * out_height && ppl != out_width) {
1447 tmp = pclk * (height - 2 * out_height) * out_width;
1448 do_div(tmp, 2 * out_height * (ppl - out_width));
1449 fclk = max(fclk, (u32) tmp);
1450 }
1451 }
1452
1453 if (width > out_width) {
1454 tmp = pclk * width;
1455 do_div(tmp, out_width);
1456 fclk = max(fclk, (u32) tmp);
1457
1458 if (color_mode == OMAP_DSS_COLOR_RGB24U)
1459 fclk <<= 1;
1460 }
1461
1462 return fclk;
1463}
1464
1465static unsigned long calc_fclk(u16 width, u16 height,
1466 u16 out_width, u16 out_height)
1467{
1468 unsigned int hf, vf;
1469
1470 /*
1471 * FIXME how to determine the 'A' factor
1472 * for the no downscaling case ?
1473 */
1474
1475 if (width > 3 * out_width)
1476 hf = 4;
1477 else if (width > 2 * out_width)
1478 hf = 3;
1479 else if (width > out_width)
1480 hf = 2;
1481 else
1482 hf = 1;
1483
1484 if (height > out_height)
1485 vf = 2;
1486 else
1487 vf = 1;
1488
1489 /* FIXME venc pclk? */
1490 return dispc_pclk_rate() * vf * hf;
1491}
1492
1493void dispc_set_channel_out(enum omap_plane plane, enum omap_channel channel_out)
1494{
1495 enable_clocks(1);
1496 _dispc_set_channel_out(plane, channel_out);
1497 enable_clocks(0);
1498}
1499
1500static int _dispc_setup_plane(enum omap_plane plane,
1501 u32 paddr, u16 screen_width,
1502 u16 pos_x, u16 pos_y,
1503 u16 width, u16 height,
1504 u16 out_width, u16 out_height,
1505 enum omap_color_mode color_mode,
1506 bool ilace,
1507 enum omap_dss_rotation_type rotation_type,
1508 u8 rotation, int mirror,
1509 u8 global_alpha)
1510{
1511 const int maxdownscale = cpu_is_omap34xx() ? 4 : 2;
1512 bool five_taps = 0;
1513 bool fieldmode = 0;
1514 int cconv = 0;
1515 unsigned offset0, offset1;
1516 s32 row_inc;
1517 s32 pix_inc;
1518 u16 frame_height = height;
1519 unsigned int field_offset = 0;
1520
1521 if (paddr == 0)
1522 return -EINVAL;
1523
1524 if (ilace && height == out_height)
1525 fieldmode = 1;
1526
1527 if (ilace) {
1528 if (fieldmode)
1529 height /= 2;
1530 pos_y /= 2;
1531 out_height /= 2;
1532
1533 DSSDBG("adjusting for ilace: height %d, pos_y %d, "
1534 "out_height %d\n",
1535 height, pos_y, out_height);
1536 }
1537
1538 if (plane == OMAP_DSS_GFX) {
1539 if (width != out_width || height != out_height)
1540 return -EINVAL;
1541
1542 switch (color_mode) {
1543 case OMAP_DSS_COLOR_ARGB16:
1544 case OMAP_DSS_COLOR_ARGB32:
1545 case OMAP_DSS_COLOR_RGBA32:
1546 case OMAP_DSS_COLOR_RGBX32:
1547 if (cpu_is_omap24xx())
1548 return -EINVAL;
1549 /* fall through */
1550 case OMAP_DSS_COLOR_RGB12U:
1551 case OMAP_DSS_COLOR_RGB16:
1552 case OMAP_DSS_COLOR_RGB24P:
1553 case OMAP_DSS_COLOR_RGB24U:
1554 break;
1555
1556 default:
1557 return -EINVAL;
1558 }
1559 } else {
1560 /* video plane */
1561
1562 unsigned long fclk = 0;
1563
1564 if (out_width < width / maxdownscale ||
1565 out_width > width * 8)
1566 return -EINVAL;
1567
1568 if (out_height < height / maxdownscale ||
1569 out_height > height * 8)
1570 return -EINVAL;
1571
1572 switch (color_mode) {
1573 case OMAP_DSS_COLOR_RGBX32:
1574 case OMAP_DSS_COLOR_RGB12U:
1575 if (cpu_is_omap24xx())
1576 return -EINVAL;
1577 /* fall through */
1578 case OMAP_DSS_COLOR_RGB16:
1579 case OMAP_DSS_COLOR_RGB24P:
1580 case OMAP_DSS_COLOR_RGB24U:
1581 break;
1582
1583 case OMAP_DSS_COLOR_ARGB16:
1584 case OMAP_DSS_COLOR_ARGB32:
1585 case OMAP_DSS_COLOR_RGBA32:
1586 if (cpu_is_omap24xx())
1587 return -EINVAL;
1588 if (plane == OMAP_DSS_VIDEO1)
1589 return -EINVAL;
1590 break;
1591
1592 case OMAP_DSS_COLOR_YUV2:
1593 case OMAP_DSS_COLOR_UYVY:
1594 cconv = 1;
1595 break;
1596
1597 default:
1598 return -EINVAL;
1599 }
1600
1601 /* Must use 5-tap filter? */
1602 five_taps = height > out_height * 2;
1603
1604 if (!five_taps) {
1605 fclk = calc_fclk(width, height,
1606 out_width, out_height);
1607
1608 /* Try 5-tap filter if 3-tap fclk is too high */
1609 if (cpu_is_omap34xx() && height > out_height &&
1610 fclk > dispc_fclk_rate())
1611 five_taps = true;
1612 }
1613
1614 if (width > (2048 >> five_taps)) {
1615 DSSERR("failed to set up scaling, fclk too low\n");
1616 return -EINVAL;
1617 }
1618
1619 if (five_taps)
1620 fclk = calc_fclk_five_taps(width, height,
1621 out_width, out_height, color_mode);
1622
1623 DSSDBG("required fclk rate = %lu Hz\n", fclk);
1624 DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
1625
1626 if (fclk > dispc_fclk_rate()) {
1627 DSSERR("failed to set up scaling, "
1628 "required fclk rate = %lu Hz, "
1629 "current fclk rate = %lu Hz\n",
1630 fclk, dispc_fclk_rate());
1631 return -EINVAL;
1632 }
1633 }
1634
1635 if (ilace && !fieldmode) {
1636 /*
1637 * when downscaling the bottom field may have to start several
1638 * source lines below the top field. Unfortunately ACCUI
1639 * registers will only hold the fractional part of the offset
1640 * so the integer part must be added to the base address of the
1641 * bottom field.
1642 */
1643 if (!height || height == out_height)
1644 field_offset = 0;
1645 else
1646 field_offset = height / out_height / 2;
1647 }
1648
1649 /* Fields are independent but interleaved in memory. */
1650 if (fieldmode)
1651 field_offset = 1;
1652
1653 if (rotation_type == OMAP_DSS_ROT_DMA)
1654 calc_dma_rotation_offset(rotation, mirror,
1655 screen_width, width, frame_height, color_mode,
1656 fieldmode, field_offset,
1657 &offset0, &offset1, &row_inc, &pix_inc);
1658 else
1659 calc_vrfb_rotation_offset(rotation, mirror,
1660 screen_width, width, frame_height, color_mode,
1661 fieldmode, field_offset,
1662 &offset0, &offset1, &row_inc, &pix_inc);
1663
1664 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
1665 offset0, offset1, row_inc, pix_inc);
1666
1667 _dispc_set_color_mode(plane, color_mode);
1668
1669 _dispc_set_plane_ba0(plane, paddr + offset0);
1670 _dispc_set_plane_ba1(plane, paddr + offset1);
1671
1672 _dispc_set_row_inc(plane, row_inc);
1673 _dispc_set_pix_inc(plane, pix_inc);
1674
1675 DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, width, height,
1676 out_width, out_height);
1677
1678 _dispc_set_plane_pos(plane, pos_x, pos_y);
1679
1680 _dispc_set_pic_size(plane, width, height);
1681
1682 if (plane != OMAP_DSS_GFX) {
1683 _dispc_set_scaling(plane, width, height,
1684 out_width, out_height,
1685 ilace, five_taps, fieldmode);
1686 _dispc_set_vid_size(plane, out_width, out_height);
1687 _dispc_set_vid_color_conv(plane, cconv);
1688 }
1689
1690 _dispc_set_rotation_attrs(plane, rotation, mirror, color_mode);
1691
1692 if (plane != OMAP_DSS_VIDEO1)
1693 _dispc_setup_global_alpha(plane, global_alpha);
1694
1695 return 0;
1696}
1697
1698static void _dispc_enable_plane(enum omap_plane plane, bool enable)
1699{
1700 REG_FLD_MOD(dispc_reg_att[plane], enable ? 1 : 0, 0, 0);
1701}
1702
1703static void dispc_disable_isr(void *data, u32 mask)
1704{
1705 struct completion *compl = data;
1706 complete(compl);
1707}
1708
1709static void _enable_lcd_out(bool enable)
1710{
1711 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
1712}
1713
1714void dispc_enable_lcd_out(bool enable)
1715{
1716 struct completion frame_done_completion;
1717 bool is_on;
1718 int r;
1719
1720 enable_clocks(1);
1721
1722 /* When we disable LCD output, we need to wait until frame is done.
1723 * Otherwise the DSS is still working, and turning off the clocks
1724 * prevents DSS from going to OFF mode */
1725 is_on = REG_GET(DISPC_CONTROL, 0, 0);
1726
1727 if (!enable && is_on) {
1728 init_completion(&frame_done_completion);
1729
1730 r = omap_dispc_register_isr(dispc_disable_isr,
1731 &frame_done_completion,
1732 DISPC_IRQ_FRAMEDONE);
1733
1734 if (r)
1735 DSSERR("failed to register FRAMEDONE isr\n");
1736 }
1737
1738 _enable_lcd_out(enable);
1739
1740 if (!enable && is_on) {
1741 if (!wait_for_completion_timeout(&frame_done_completion,
1742 msecs_to_jiffies(100)))
1743 DSSERR("timeout waiting for FRAME DONE\n");
1744
1745 r = omap_dispc_unregister_isr(dispc_disable_isr,
1746 &frame_done_completion,
1747 DISPC_IRQ_FRAMEDONE);
1748
1749 if (r)
1750 DSSERR("failed to unregister FRAMEDONE isr\n");
1751 }
1752
1753 enable_clocks(0);
1754}
1755
1756static void _enable_digit_out(bool enable)
1757{
1758 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
1759}
1760
1761void dispc_enable_digit_out(bool enable)
1762{
1763 struct completion frame_done_completion;
1764 int r;
1765
1766 enable_clocks(1);
1767
1768 if (REG_GET(DISPC_CONTROL, 1, 1) == enable) {
1769 enable_clocks(0);
1770 return;
1771 }
1772
1773 if (enable) {
1774 unsigned long flags;
1775 /* When we enable digit output, we'll get an extra digit
1776 * sync lost interrupt, that we need to ignore */
1777 spin_lock_irqsave(&dispc.irq_lock, flags);
1778 dispc.irq_error_mask &= ~DISPC_IRQ_SYNC_LOST_DIGIT;
1779 _omap_dispc_set_irqs();
1780 spin_unlock_irqrestore(&dispc.irq_lock, flags);
1781 }
1782
1783 /* When we disable digit output, we need to wait until fields are done.
1784 * Otherwise the DSS is still working, and turning off the clocks
1785 * prevents DSS from going to OFF mode. And when enabling, we need to
1786 * wait for the extra sync losts */
1787 init_completion(&frame_done_completion);
1788
1789 r = omap_dispc_register_isr(dispc_disable_isr, &frame_done_completion,
1790 DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
1791 if (r)
1792 DSSERR("failed to register EVSYNC isr\n");
1793
1794 _enable_digit_out(enable);
1795
1796 /* XXX I understand from TRM that we should only wait for the
1797 * current field to complete. But it seems we have to wait
1798 * for both fields */
1799 if (!wait_for_completion_timeout(&frame_done_completion,
1800 msecs_to_jiffies(100)))
1801 DSSERR("timeout waiting for EVSYNC\n");
1802
1803 if (!wait_for_completion_timeout(&frame_done_completion,
1804 msecs_to_jiffies(100)))
1805 DSSERR("timeout waiting for EVSYNC\n");
1806
1807 r = omap_dispc_unregister_isr(dispc_disable_isr,
1808 &frame_done_completion,
1809 DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD);
1810 if (r)
1811 DSSERR("failed to unregister EVSYNC isr\n");
1812
1813 if (enable) {
1814 unsigned long flags;
1815 spin_lock_irqsave(&dispc.irq_lock, flags);
1816 dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
1817 dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT);
1818 _omap_dispc_set_irqs();
1819 spin_unlock_irqrestore(&dispc.irq_lock, flags);
1820 }
1821
1822 enable_clocks(0);
1823}
1824
1825void dispc_lcd_enable_signal_polarity(bool act_high)
1826{
1827 enable_clocks(1);
1828 REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
1829 enable_clocks(0);
1830}
1831
1832void dispc_lcd_enable_signal(bool enable)
1833{
1834 enable_clocks(1);
1835 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
1836 enable_clocks(0);
1837}
1838
1839void dispc_pck_free_enable(bool enable)
1840{
1841 enable_clocks(1);
1842 REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
1843 enable_clocks(0);
1844}
1845
1846void dispc_enable_fifohandcheck(bool enable)
1847{
1848 enable_clocks(1);
1849 REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16);
1850 enable_clocks(0);
1851}
1852
1853
1854void dispc_set_lcd_display_type(enum omap_lcd_display_type type)
1855{
1856 int mode;
1857
1858 switch (type) {
1859 case OMAP_DSS_LCD_DISPLAY_STN:
1860 mode = 0;
1861 break;
1862
1863 case OMAP_DSS_LCD_DISPLAY_TFT:
1864 mode = 1;
1865 break;
1866
1867 default:
1868 BUG();
1869 return;
1870 }
1871
1872 enable_clocks(1);
1873 REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3);
1874 enable_clocks(0);
1875}
1876
1877void dispc_set_loadmode(enum omap_dss_load_mode mode)
1878{
1879 enable_clocks(1);
1880 REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
1881 enable_clocks(0);
1882}
1883
1884
1885void dispc_set_default_color(enum omap_channel channel, u32 color)
1886{
1887 const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
1888 DISPC_DEFAULT_COLOR1 };
1889
1890 enable_clocks(1);
1891 dispc_write_reg(def_reg[channel], color);
1892 enable_clocks(0);
1893}
1894
1895u32 dispc_get_default_color(enum omap_channel channel)
1896{
1897 const struct dispc_reg def_reg[] = { DISPC_DEFAULT_COLOR0,
1898 DISPC_DEFAULT_COLOR1 };
1899 u32 l;
1900
1901 BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
1902 channel != OMAP_DSS_CHANNEL_LCD);
1903
1904 enable_clocks(1);
1905 l = dispc_read_reg(def_reg[channel]);
1906 enable_clocks(0);
1907
1908 return l;
1909}
1910
1911void dispc_set_trans_key(enum omap_channel ch,
1912 enum omap_dss_trans_key_type type,
1913 u32 trans_key)
1914{
1915 const struct dispc_reg tr_reg[] = {
1916 DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
1917
1918 enable_clocks(1);
1919 if (ch == OMAP_DSS_CHANNEL_LCD)
1920 REG_FLD_MOD(DISPC_CONFIG, type, 11, 11);
1921 else /* OMAP_DSS_CHANNEL_DIGIT */
1922 REG_FLD_MOD(DISPC_CONFIG, type, 13, 13);
1923
1924 dispc_write_reg(tr_reg[ch], trans_key);
1925 enable_clocks(0);
1926}
1927
1928void dispc_get_trans_key(enum omap_channel ch,
1929 enum omap_dss_trans_key_type *type,
1930 u32 *trans_key)
1931{
1932 const struct dispc_reg tr_reg[] = {
1933 DISPC_TRANS_COLOR0, DISPC_TRANS_COLOR1 };
1934
1935 enable_clocks(1);
1936 if (type) {
1937 if (ch == OMAP_DSS_CHANNEL_LCD)
1938 *type = REG_GET(DISPC_CONFIG, 11, 11);
1939 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
1940 *type = REG_GET(DISPC_CONFIG, 13, 13);
1941 else
1942 BUG();
1943 }
1944
1945 if (trans_key)
1946 *trans_key = dispc_read_reg(tr_reg[ch]);
1947 enable_clocks(0);
1948}
1949
1950void dispc_enable_trans_key(enum omap_channel ch, bool enable)
1951{
1952 enable_clocks(1);
1953 if (ch == OMAP_DSS_CHANNEL_LCD)
1954 REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
1955 else /* OMAP_DSS_CHANNEL_DIGIT */
1956 REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12);
1957 enable_clocks(0);
1958}
1959void dispc_enable_alpha_blending(enum omap_channel ch, bool enable)
1960{
1961 if (cpu_is_omap24xx())
1962 return;
1963
1964 enable_clocks(1);
1965 if (ch == OMAP_DSS_CHANNEL_LCD)
1966 REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
1967 else /* OMAP_DSS_CHANNEL_DIGIT */
1968 REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
1969 enable_clocks(0);
1970}
1971bool dispc_alpha_blending_enabled(enum omap_channel ch)
1972{
1973 bool enabled;
1974
1975 if (cpu_is_omap24xx())
1976 return false;
1977
1978 enable_clocks(1);
1979 if (ch == OMAP_DSS_CHANNEL_LCD)
1980 enabled = REG_GET(DISPC_CONFIG, 18, 18);
1981 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
1982 enabled = REG_GET(DISPC_CONFIG, 18, 18);
1983 else
1984 BUG();
1985 enable_clocks(0);
1986
1987 return enabled;
1988
1989}
1990
1991
1992bool dispc_trans_key_enabled(enum omap_channel ch)
1993{
1994 bool enabled;
1995
1996 enable_clocks(1);
1997 if (ch == OMAP_DSS_CHANNEL_LCD)
1998 enabled = REG_GET(DISPC_CONFIG, 10, 10);
1999 else if (ch == OMAP_DSS_CHANNEL_DIGIT)
2000 enabled = REG_GET(DISPC_CONFIG, 12, 12);
2001 else
2002 BUG();
2003 enable_clocks(0);
2004
2005 return enabled;
2006}
2007
2008
2009void dispc_set_tft_data_lines(u8 data_lines)
2010{
2011 int code;
2012
2013 switch (data_lines) {
2014 case 12:
2015 code = 0;
2016 break;
2017 case 16:
2018 code = 1;
2019 break;
2020 case 18:
2021 code = 2;
2022 break;
2023 case 24:
2024 code = 3;
2025 break;
2026 default:
2027 BUG();
2028 return;
2029 }
2030
2031 enable_clocks(1);
2032 REG_FLD_MOD(DISPC_CONTROL, code, 9, 8);
2033 enable_clocks(0);
2034}
2035
2036void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode)
2037{
2038 u32 l;
2039 int stallmode;
2040 int gpout0 = 1;
2041 int gpout1;
2042
2043 switch (mode) {
2044 case OMAP_DSS_PARALLELMODE_BYPASS:
2045 stallmode = 0;
2046 gpout1 = 1;
2047 break;
2048
2049 case OMAP_DSS_PARALLELMODE_RFBI:
2050 stallmode = 1;
2051 gpout1 = 0;
2052 break;
2053
2054 case OMAP_DSS_PARALLELMODE_DSI:
2055 stallmode = 1;
2056 gpout1 = 1;
2057 break;
2058
2059 default:
2060 BUG();
2061 return;
2062 }
2063
2064 enable_clocks(1);
2065
2066 l = dispc_read_reg(DISPC_CONTROL);
2067
2068 l = FLD_MOD(l, stallmode, 11, 11);
2069 l = FLD_MOD(l, gpout0, 15, 15);
2070 l = FLD_MOD(l, gpout1, 16, 16);
2071
2072 dispc_write_reg(DISPC_CONTROL, l);
2073
2074 enable_clocks(0);
2075}
2076
2077static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
2078 int vsw, int vfp, int vbp)
2079{
2080 if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
2081 if (hsw < 1 || hsw > 64 ||
2082 hfp < 1 || hfp > 256 ||
2083 hbp < 1 || hbp > 256 ||
2084 vsw < 1 || vsw > 64 ||
2085 vfp < 0 || vfp > 255 ||
2086 vbp < 0 || vbp > 255)
2087 return false;
2088 } else {
2089 if (hsw < 1 || hsw > 256 ||
2090 hfp < 1 || hfp > 4096 ||
2091 hbp < 1 || hbp > 4096 ||
2092 vsw < 1 || vsw > 256 ||
2093 vfp < 0 || vfp > 4095 ||
2094 vbp < 0 || vbp > 4095)
2095 return false;
2096 }
2097
2098 return true;
2099}
2100
2101bool dispc_lcd_timings_ok(struct omap_video_timings *timings)
2102{
2103 return _dispc_lcd_timings_ok(timings->hsw, timings->hfp,
2104 timings->hbp, timings->vsw,
2105 timings->vfp, timings->vbp);
2106}
2107
2108static void _dispc_set_lcd_timings(int hsw, int hfp, int hbp,
2109 int vsw, int vfp, int vbp)
2110{
2111 u32 timing_h, timing_v;
2112
2113 if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) {
2114 timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) |
2115 FLD_VAL(hbp-1, 27, 20);
2116
2117 timing_v = FLD_VAL(vsw-1, 5, 0) | FLD_VAL(vfp, 15, 8) |
2118 FLD_VAL(vbp, 27, 20);
2119 } else {
2120 timing_h = FLD_VAL(hsw-1, 7, 0) | FLD_VAL(hfp-1, 19, 8) |
2121 FLD_VAL(hbp-1, 31, 20);
2122
2123 timing_v = FLD_VAL(vsw-1, 7, 0) | FLD_VAL(vfp, 19, 8) |
2124 FLD_VAL(vbp, 31, 20);
2125 }
2126
2127 enable_clocks(1);
2128 dispc_write_reg(DISPC_TIMING_H, timing_h);
2129 dispc_write_reg(DISPC_TIMING_V, timing_v);
2130 enable_clocks(0);
2131}
2132
2133/* change name to mode? */
2134void dispc_set_lcd_timings(struct omap_video_timings *timings)
2135{
2136 unsigned xtot, ytot;
2137 unsigned long ht, vt;
2138
2139 if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp,
2140 timings->hbp, timings->vsw,
2141 timings->vfp, timings->vbp))
2142 BUG();
2143
2144 _dispc_set_lcd_timings(timings->hsw, timings->hfp, timings->hbp,
2145 timings->vsw, timings->vfp, timings->vbp);
2146
2147 dispc_set_lcd_size(timings->x_res, timings->y_res);
2148
2149 xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp;
2150 ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp;
2151
2152 ht = (timings->pixel_clock * 1000) / xtot;
2153 vt = (timings->pixel_clock * 1000) / xtot / ytot;
2154
2155 DSSDBG("xres %u yres %u\n", timings->x_res, timings->y_res);
2156 DSSDBG("pck %u\n", timings->pixel_clock);
2157 DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
2158 timings->hsw, timings->hfp, timings->hbp,
2159 timings->vsw, timings->vfp, timings->vbp);
2160
2161 DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
2162}
2163
2164static void dispc_set_lcd_divisor(u16 lck_div, u16 pck_div)
2165{
2166 BUG_ON(lck_div < 1);
2167 BUG_ON(pck_div < 2);
2168
2169 enable_clocks(1);
2170 dispc_write_reg(DISPC_DIVISOR,
2171 FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
2172 enable_clocks(0);
2173}
2174
2175static void dispc_get_lcd_divisor(int *lck_div, int *pck_div)
2176{
2177 u32 l;
2178 l = dispc_read_reg(DISPC_DIVISOR);
2179 *lck_div = FLD_GET(l, 23, 16);
2180 *pck_div = FLD_GET(l, 7, 0);
2181}
2182
2183unsigned long dispc_fclk_rate(void)
2184{
2185 unsigned long r = 0;
2186
2187 if (dss_get_dispc_clk_source() == 0)
2188 r = dss_clk_get_rate(DSS_CLK_FCK1);
2189 else
2190#ifdef CONFIG_OMAP2_DSS_DSI
2191 r = dsi_get_dsi1_pll_rate();
2192#else
2193 BUG();
2194#endif
2195 return r;
2196}
2197
2198unsigned long dispc_lclk_rate(void)
2199{
2200 int lcd;
2201 unsigned long r;
2202 u32 l;
2203
2204 l = dispc_read_reg(DISPC_DIVISOR);
2205
2206 lcd = FLD_GET(l, 23, 16);
2207
2208 r = dispc_fclk_rate();
2209
2210 return r / lcd;
2211}
2212
2213unsigned long dispc_pclk_rate(void)
2214{
2215 int lcd, pcd;
2216 unsigned long r;
2217 u32 l;
2218
2219 l = dispc_read_reg(DISPC_DIVISOR);
2220
2221 lcd = FLD_GET(l, 23, 16);
2222 pcd = FLD_GET(l, 7, 0);
2223
2224 r = dispc_fclk_rate();
2225
2226 return r / lcd / pcd;
2227}
2228
2229void dispc_dump_clocks(struct seq_file *s)
2230{
2231 int lcd, pcd;
2232
2233 enable_clocks(1);
2234
2235 dispc_get_lcd_divisor(&lcd, &pcd);
2236
2237 seq_printf(s, "- DISPC -\n");
2238
2239 seq_printf(s, "dispc fclk source = %s\n",
2240 dss_get_dispc_clk_source() == 0 ?
2241 "dss1_alwon_fclk" : "dsi1_pll_fclk");
2242
2243 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
2244 seq_printf(s, "lck\t\t%-16lulck div\t%u\n", dispc_lclk_rate(), lcd);
2245 seq_printf(s, "pck\t\t%-16lupck div\t%u\n", dispc_pclk_rate(), pcd);
2246
2247 enable_clocks(0);
2248}
2249
2250void dispc_dump_regs(struct seq_file *s)
2251{
2252#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dispc_read_reg(r))
2253
2254 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
2255
2256 DUMPREG(DISPC_REVISION);
2257 DUMPREG(DISPC_SYSCONFIG);
2258 DUMPREG(DISPC_SYSSTATUS);
2259 DUMPREG(DISPC_IRQSTATUS);
2260 DUMPREG(DISPC_IRQENABLE);
2261 DUMPREG(DISPC_CONTROL);
2262 DUMPREG(DISPC_CONFIG);
2263 DUMPREG(DISPC_CAPABLE);
2264 DUMPREG(DISPC_DEFAULT_COLOR0);
2265 DUMPREG(DISPC_DEFAULT_COLOR1);
2266 DUMPREG(DISPC_TRANS_COLOR0);
2267 DUMPREG(DISPC_TRANS_COLOR1);
2268 DUMPREG(DISPC_LINE_STATUS);
2269 DUMPREG(DISPC_LINE_NUMBER);
2270 DUMPREG(DISPC_TIMING_H);
2271 DUMPREG(DISPC_TIMING_V);
2272 DUMPREG(DISPC_POL_FREQ);
2273 DUMPREG(DISPC_DIVISOR);
2274 DUMPREG(DISPC_GLOBAL_ALPHA);
2275 DUMPREG(DISPC_SIZE_DIG);
2276 DUMPREG(DISPC_SIZE_LCD);
2277
2278 DUMPREG(DISPC_GFX_BA0);
2279 DUMPREG(DISPC_GFX_BA1);
2280 DUMPREG(DISPC_GFX_POSITION);
2281 DUMPREG(DISPC_GFX_SIZE);
2282 DUMPREG(DISPC_GFX_ATTRIBUTES);
2283 DUMPREG(DISPC_GFX_FIFO_THRESHOLD);
2284 DUMPREG(DISPC_GFX_FIFO_SIZE_STATUS);
2285 DUMPREG(DISPC_GFX_ROW_INC);
2286 DUMPREG(DISPC_GFX_PIXEL_INC);
2287 DUMPREG(DISPC_GFX_WINDOW_SKIP);
2288 DUMPREG(DISPC_GFX_TABLE_BA);
2289
2290 DUMPREG(DISPC_DATA_CYCLE1);
2291 DUMPREG(DISPC_DATA_CYCLE2);
2292 DUMPREG(DISPC_DATA_CYCLE3);
2293
2294 DUMPREG(DISPC_CPR_COEF_R);
2295 DUMPREG(DISPC_CPR_COEF_G);
2296 DUMPREG(DISPC_CPR_COEF_B);
2297
2298 DUMPREG(DISPC_GFX_PRELOAD);
2299
2300 DUMPREG(DISPC_VID_BA0(0));
2301 DUMPREG(DISPC_VID_BA1(0));
2302 DUMPREG(DISPC_VID_POSITION(0));
2303 DUMPREG(DISPC_VID_SIZE(0));
2304 DUMPREG(DISPC_VID_ATTRIBUTES(0));
2305 DUMPREG(DISPC_VID_FIFO_THRESHOLD(0));
2306 DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(0));
2307 DUMPREG(DISPC_VID_ROW_INC(0));
2308 DUMPREG(DISPC_VID_PIXEL_INC(0));
2309 DUMPREG(DISPC_VID_FIR(0));
2310 DUMPREG(DISPC_VID_PICTURE_SIZE(0));
2311 DUMPREG(DISPC_VID_ACCU0(0));
2312 DUMPREG(DISPC_VID_ACCU1(0));
2313
2314 DUMPREG(DISPC_VID_BA0(1));
2315 DUMPREG(DISPC_VID_BA1(1));
2316 DUMPREG(DISPC_VID_POSITION(1));
2317 DUMPREG(DISPC_VID_SIZE(1));
2318 DUMPREG(DISPC_VID_ATTRIBUTES(1));
2319 DUMPREG(DISPC_VID_FIFO_THRESHOLD(1));
2320 DUMPREG(DISPC_VID_FIFO_SIZE_STATUS(1));
2321 DUMPREG(DISPC_VID_ROW_INC(1));
2322 DUMPREG(DISPC_VID_PIXEL_INC(1));
2323 DUMPREG(DISPC_VID_FIR(1));
2324 DUMPREG(DISPC_VID_PICTURE_SIZE(1));
2325 DUMPREG(DISPC_VID_ACCU0(1));
2326 DUMPREG(DISPC_VID_ACCU1(1));
2327
2328 DUMPREG(DISPC_VID_FIR_COEF_H(0, 0));
2329 DUMPREG(DISPC_VID_FIR_COEF_H(0, 1));
2330 DUMPREG(DISPC_VID_FIR_COEF_H(0, 2));
2331 DUMPREG(DISPC_VID_FIR_COEF_H(0, 3));
2332 DUMPREG(DISPC_VID_FIR_COEF_H(0, 4));
2333 DUMPREG(DISPC_VID_FIR_COEF_H(0, 5));
2334 DUMPREG(DISPC_VID_FIR_COEF_H(0, 6));
2335 DUMPREG(DISPC_VID_FIR_COEF_H(0, 7));
2336 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 0));
2337 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 1));
2338 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 2));
2339 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 3));
2340 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 4));
2341 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 5));
2342 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 6));
2343 DUMPREG(DISPC_VID_FIR_COEF_HV(0, 7));
2344 DUMPREG(DISPC_VID_CONV_COEF(0, 0));
2345 DUMPREG(DISPC_VID_CONV_COEF(0, 1));
2346 DUMPREG(DISPC_VID_CONV_COEF(0, 2));
2347 DUMPREG(DISPC_VID_CONV_COEF(0, 3));
2348 DUMPREG(DISPC_VID_CONV_COEF(0, 4));
2349 DUMPREG(DISPC_VID_FIR_COEF_V(0, 0));
2350 DUMPREG(DISPC_VID_FIR_COEF_V(0, 1));
2351 DUMPREG(DISPC_VID_FIR_COEF_V(0, 2));
2352 DUMPREG(DISPC_VID_FIR_COEF_V(0, 3));
2353 DUMPREG(DISPC_VID_FIR_COEF_V(0, 4));
2354 DUMPREG(DISPC_VID_FIR_COEF_V(0, 5));
2355 DUMPREG(DISPC_VID_FIR_COEF_V(0, 6));
2356 DUMPREG(DISPC_VID_FIR_COEF_V(0, 7));
2357
2358 DUMPREG(DISPC_VID_FIR_COEF_H(1, 0));
2359 DUMPREG(DISPC_VID_FIR_COEF_H(1, 1));
2360 DUMPREG(DISPC_VID_FIR_COEF_H(1, 2));
2361 DUMPREG(DISPC_VID_FIR_COEF_H(1, 3));
2362 DUMPREG(DISPC_VID_FIR_COEF_H(1, 4));
2363 DUMPREG(DISPC_VID_FIR_COEF_H(1, 5));
2364 DUMPREG(DISPC_VID_FIR_COEF_H(1, 6));
2365 DUMPREG(DISPC_VID_FIR_COEF_H(1, 7));
2366 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 0));
2367 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 1));
2368 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 2));
2369 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 3));
2370 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 4));
2371 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 5));
2372 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 6));
2373 DUMPREG(DISPC_VID_FIR_COEF_HV(1, 7));
2374 DUMPREG(DISPC_VID_CONV_COEF(1, 0));
2375 DUMPREG(DISPC_VID_CONV_COEF(1, 1));
2376 DUMPREG(DISPC_VID_CONV_COEF(1, 2));
2377 DUMPREG(DISPC_VID_CONV_COEF(1, 3));
2378 DUMPREG(DISPC_VID_CONV_COEF(1, 4));
2379 DUMPREG(DISPC_VID_FIR_COEF_V(1, 0));
2380 DUMPREG(DISPC_VID_FIR_COEF_V(1, 1));
2381 DUMPREG(DISPC_VID_FIR_COEF_V(1, 2));
2382 DUMPREG(DISPC_VID_FIR_COEF_V(1, 3));
2383 DUMPREG(DISPC_VID_FIR_COEF_V(1, 4));
2384 DUMPREG(DISPC_VID_FIR_COEF_V(1, 5));
2385 DUMPREG(DISPC_VID_FIR_COEF_V(1, 6));
2386 DUMPREG(DISPC_VID_FIR_COEF_V(1, 7));
2387
2388 DUMPREG(DISPC_VID_PRELOAD(0));
2389 DUMPREG(DISPC_VID_PRELOAD(1));
2390
2391 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
2392#undef DUMPREG
2393}
2394
2395static void _dispc_set_pol_freq(bool onoff, bool rf, bool ieo, bool ipc,
2396 bool ihs, bool ivs, u8 acbi, u8 acb)
2397{
2398 u32 l = 0;
2399
2400 DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n",
2401 onoff, rf, ieo, ipc, ihs, ivs, acbi, acb);
2402
2403 l |= FLD_VAL(onoff, 17, 17);
2404 l |= FLD_VAL(rf, 16, 16);
2405 l |= FLD_VAL(ieo, 15, 15);
2406 l |= FLD_VAL(ipc, 14, 14);
2407 l |= FLD_VAL(ihs, 13, 13);
2408 l |= FLD_VAL(ivs, 12, 12);
2409 l |= FLD_VAL(acbi, 11, 8);
2410 l |= FLD_VAL(acb, 7, 0);
2411
2412 enable_clocks(1);
2413 dispc_write_reg(DISPC_POL_FREQ, l);
2414 enable_clocks(0);
2415}
2416
2417void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb)
2418{
2419 _dispc_set_pol_freq((config & OMAP_DSS_LCD_ONOFF) != 0,
2420 (config & OMAP_DSS_LCD_RF) != 0,
2421 (config & OMAP_DSS_LCD_IEO) != 0,
2422 (config & OMAP_DSS_LCD_IPC) != 0,
2423 (config & OMAP_DSS_LCD_IHS) != 0,
2424 (config & OMAP_DSS_LCD_IVS) != 0,
2425 acbi, acb);
2426}
2427
2428/* with fck as input clock rate, find dispc dividers that produce req_pck */
2429void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
2430 struct dispc_clock_info *cinfo)
2431{
2432 u16 pcd_min = is_tft ? 2 : 3;
2433 unsigned long best_pck;
2434 u16 best_ld, cur_ld;
2435 u16 best_pd, cur_pd;
2436
2437 best_pck = 0;
2438 best_ld = 0;
2439 best_pd = 0;
2440
2441 for (cur_ld = 1; cur_ld <= 255; ++cur_ld) {
2442 unsigned long lck = fck / cur_ld;
2443
2444 for (cur_pd = pcd_min; cur_pd <= 255; ++cur_pd) {
2445 unsigned long pck = lck / cur_pd;
2446 long old_delta = abs(best_pck - req_pck);
2447 long new_delta = abs(pck - req_pck);
2448
2449 if (best_pck == 0 || new_delta < old_delta) {
2450 best_pck = pck;
2451 best_ld = cur_ld;
2452 best_pd = cur_pd;
2453
2454 if (pck == req_pck)
2455 goto found;
2456 }
2457
2458 if (pck < req_pck)
2459 break;
2460 }
2461
2462 if (lck / pcd_min < req_pck)
2463 break;
2464 }
2465
2466found:
2467 cinfo->lck_div = best_ld;
2468 cinfo->pck_div = best_pd;
2469 cinfo->lck = fck / cinfo->lck_div;
2470 cinfo->pck = cinfo->lck / cinfo->pck_div;
2471}
2472
2473/* calculate clock rates using dividers in cinfo */
2474int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
2475 struct dispc_clock_info *cinfo)
2476{
2477 if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
2478 return -EINVAL;
2479 if (cinfo->pck_div < 2 || cinfo->pck_div > 255)
2480 return -EINVAL;
2481
2482 cinfo->lck = dispc_fclk_rate / cinfo->lck_div;
2483 cinfo->pck = cinfo->lck / cinfo->pck_div;
2484
2485 return 0;
2486}
2487
2488int dispc_set_clock_div(struct dispc_clock_info *cinfo)
2489{
2490 DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
2491 DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
2492
2493 dispc_set_lcd_divisor(cinfo->lck_div, cinfo->pck_div);
2494
2495 return 0;
2496}
2497
2498int dispc_get_clock_div(struct dispc_clock_info *cinfo)
2499{
2500 unsigned long fck;
2501
2502 fck = dispc_fclk_rate();
2503
2504 cinfo->lck_div = REG_GET(DISPC_DIVISOR, 23, 16);
2505 cinfo->pck_div = REG_GET(DISPC_DIVISOR, 7, 0);
2506
2507 cinfo->lck = fck / cinfo->lck_div;
2508 cinfo->pck = cinfo->lck / cinfo->pck_div;
2509
2510 return 0;
2511}
2512
2513/* dispc.irq_lock has to be locked by the caller */
2514static void _omap_dispc_set_irqs(void)
2515{
2516 u32 mask;
2517 u32 old_mask;
2518 int i;
2519 struct omap_dispc_isr_data *isr_data;
2520
2521 mask = dispc.irq_error_mask;
2522
2523 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
2524 isr_data = &dispc.registered_isr[i];
2525
2526 if (isr_data->isr == NULL)
2527 continue;
2528
2529 mask |= isr_data->mask;
2530 }
2531
2532 enable_clocks(1);
2533
2534 old_mask = dispc_read_reg(DISPC_IRQENABLE);
2535 /* clear the irqstatus for newly enabled irqs */
2536 dispc_write_reg(DISPC_IRQSTATUS, (mask ^ old_mask) & mask);
2537
2538 dispc_write_reg(DISPC_IRQENABLE, mask);
2539
2540 enable_clocks(0);
2541}
2542
2543int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
2544{
2545 int i;
2546 int ret;
2547 unsigned long flags;
2548 struct omap_dispc_isr_data *isr_data;
2549
2550 if (isr == NULL)
2551 return -EINVAL;
2552
2553 spin_lock_irqsave(&dispc.irq_lock, flags);
2554
2555 /* check for duplicate entry */
2556 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
2557 isr_data = &dispc.registered_isr[i];
2558 if (isr_data->isr == isr && isr_data->arg == arg &&
2559 isr_data->mask == mask) {
2560 ret = -EINVAL;
2561 goto err;
2562 }
2563 }
2564
2565 isr_data = NULL;
2566 ret = -EBUSY;
2567
2568 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
2569 isr_data = &dispc.registered_isr[i];
2570
2571 if (isr_data->isr != NULL)
2572 continue;
2573
2574 isr_data->isr = isr;
2575 isr_data->arg = arg;
2576 isr_data->mask = mask;
2577 ret = 0;
2578
2579 break;
2580 }
2581
2582 _omap_dispc_set_irqs();
2583
2584 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2585
2586 return 0;
2587err:
2588 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2589
2590 return ret;
2591}
2592EXPORT_SYMBOL(omap_dispc_register_isr);
2593
2594int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask)
2595{
2596 int i;
2597 unsigned long flags;
2598 int ret = -EINVAL;
2599 struct omap_dispc_isr_data *isr_data;
2600
2601 spin_lock_irqsave(&dispc.irq_lock, flags);
2602
2603 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
2604 isr_data = &dispc.registered_isr[i];
2605 if (isr_data->isr != isr || isr_data->arg != arg ||
2606 isr_data->mask != mask)
2607 continue;
2608
2609 /* found the correct isr */
2610
2611 isr_data->isr = NULL;
2612 isr_data->arg = NULL;
2613 isr_data->mask = 0;
2614
2615 ret = 0;
2616 break;
2617 }
2618
2619 if (ret == 0)
2620 _omap_dispc_set_irqs();
2621
2622 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2623
2624 return ret;
2625}
2626EXPORT_SYMBOL(omap_dispc_unregister_isr);
2627
2628#ifdef DEBUG
2629static void print_irq_status(u32 status)
2630{
2631 if ((status & dispc.irq_error_mask) == 0)
2632 return;
2633
2634 printk(KERN_DEBUG "DISPC IRQ: 0x%x: ", status);
2635
2636#define PIS(x) \
2637 if (status & DISPC_IRQ_##x) \
2638 printk(#x " ");
2639 PIS(GFX_FIFO_UNDERFLOW);
2640 PIS(OCP_ERR);
2641 PIS(VID1_FIFO_UNDERFLOW);
2642 PIS(VID2_FIFO_UNDERFLOW);
2643 PIS(SYNC_LOST);
2644 PIS(SYNC_LOST_DIGIT);
2645#undef PIS
2646
2647 printk("\n");
2648}
2649#endif
2650
2651/* Called from dss.c. Note that we don't touch clocks here,
2652 * but we presume they are on because we got an IRQ. However,
2653 * an irq handler may turn the clocks off, so we may not have
2654 * clock later in the function. */
2655void dispc_irq_handler(void)
2656{
2657 int i;
2658 u32 irqstatus;
2659 u32 handledirqs = 0;
2660 u32 unhandled_errors;
2661 struct omap_dispc_isr_data *isr_data;
2662 struct omap_dispc_isr_data registered_isr[DISPC_MAX_NR_ISRS];
2663
2664 spin_lock(&dispc.irq_lock);
2665
2666 irqstatus = dispc_read_reg(DISPC_IRQSTATUS);
2667
2668#ifdef DEBUG
2669 if (dss_debug)
2670 print_irq_status(irqstatus);
2671#endif
2672 /* Ack the interrupt. Do it here before clocks are possibly turned
2673 * off */
2674 dispc_write_reg(DISPC_IRQSTATUS, irqstatus);
2675 /* flush posted write */
2676 dispc_read_reg(DISPC_IRQSTATUS);
2677
2678 /* make a copy and unlock, so that isrs can unregister
2679 * themselves */
2680 memcpy(registered_isr, dispc.registered_isr,
2681 sizeof(registered_isr));
2682
2683 spin_unlock(&dispc.irq_lock);
2684
2685 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
2686 isr_data = &registered_isr[i];
2687
2688 if (!isr_data->isr)
2689 continue;
2690
2691 if (isr_data->mask & irqstatus) {
2692 isr_data->isr(isr_data->arg, irqstatus);
2693 handledirqs |= isr_data->mask;
2694 }
2695 }
2696
2697 spin_lock(&dispc.irq_lock);
2698
2699 unhandled_errors = irqstatus & ~handledirqs & dispc.irq_error_mask;
2700
2701 if (unhandled_errors) {
2702 dispc.error_irqs |= unhandled_errors;
2703
2704 dispc.irq_error_mask &= ~unhandled_errors;
2705 _omap_dispc_set_irqs();
2706
2707 schedule_work(&dispc.error_work);
2708 }
2709
2710 spin_unlock(&dispc.irq_lock);
2711}
2712
2713static void dispc_error_worker(struct work_struct *work)
2714{
2715 int i;
2716 u32 errors;
2717 unsigned long flags;
2718
2719 spin_lock_irqsave(&dispc.irq_lock, flags);
2720 errors = dispc.error_irqs;
2721 dispc.error_irqs = 0;
2722 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2723
2724 if (errors & DISPC_IRQ_GFX_FIFO_UNDERFLOW) {
2725 DSSERR("GFX_FIFO_UNDERFLOW, disabling GFX\n");
2726 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2727 struct omap_overlay *ovl;
2728 ovl = omap_dss_get_overlay(i);
2729
2730 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
2731 continue;
2732
2733 if (ovl->id == 0) {
2734 dispc_enable_plane(ovl->id, 0);
2735 dispc_go(ovl->manager->id);
2736 mdelay(50);
2737 break;
2738 }
2739 }
2740 }
2741
2742 if (errors & DISPC_IRQ_VID1_FIFO_UNDERFLOW) {
2743 DSSERR("VID1_FIFO_UNDERFLOW, disabling VID1\n");
2744 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2745 struct omap_overlay *ovl;
2746 ovl = omap_dss_get_overlay(i);
2747
2748 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
2749 continue;
2750
2751 if (ovl->id == 1) {
2752 dispc_enable_plane(ovl->id, 0);
2753 dispc_go(ovl->manager->id);
2754 mdelay(50);
2755 break;
2756 }
2757 }
2758 }
2759
2760 if (errors & DISPC_IRQ_VID2_FIFO_UNDERFLOW) {
2761 DSSERR("VID2_FIFO_UNDERFLOW, disabling VID2\n");
2762 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2763 struct omap_overlay *ovl;
2764 ovl = omap_dss_get_overlay(i);
2765
2766 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
2767 continue;
2768
2769 if (ovl->id == 2) {
2770 dispc_enable_plane(ovl->id, 0);
2771 dispc_go(ovl->manager->id);
2772 mdelay(50);
2773 break;
2774 }
2775 }
2776 }
2777
2778 if (errors & DISPC_IRQ_SYNC_LOST) {
2779 struct omap_overlay_manager *manager = NULL;
2780 bool enable = false;
2781
2782 DSSERR("SYNC_LOST, disabling LCD\n");
2783
2784 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
2785 struct omap_overlay_manager *mgr;
2786 mgr = omap_dss_get_overlay_manager(i);
2787
2788 if (mgr->id == OMAP_DSS_CHANNEL_LCD) {
2789 manager = mgr;
2790 enable = mgr->device->state ==
2791 OMAP_DSS_DISPLAY_ACTIVE;
2792 mgr->device->disable(mgr->device);
2793 break;
2794 }
2795 }
2796
2797 if (manager) {
2798 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2799 struct omap_overlay *ovl;
2800 ovl = omap_dss_get_overlay(i);
2801
2802 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
2803 continue;
2804
2805 if (ovl->id != 0 && ovl->manager == manager)
2806 dispc_enable_plane(ovl->id, 0);
2807 }
2808
2809 dispc_go(manager->id);
2810 mdelay(50);
2811 if (enable)
2812 manager->device->enable(manager->device);
2813 }
2814 }
2815
2816 if (errors & DISPC_IRQ_SYNC_LOST_DIGIT) {
2817 struct omap_overlay_manager *manager = NULL;
2818 bool enable = false;
2819
2820 DSSERR("SYNC_LOST_DIGIT, disabling TV\n");
2821
2822 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
2823 struct omap_overlay_manager *mgr;
2824 mgr = omap_dss_get_overlay_manager(i);
2825
2826 if (mgr->id == OMAP_DSS_CHANNEL_DIGIT) {
2827 manager = mgr;
2828 enable = mgr->device->state ==
2829 OMAP_DSS_DISPLAY_ACTIVE;
2830 mgr->device->disable(mgr->device);
2831 break;
2832 }
2833 }
2834
2835 if (manager) {
2836 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2837 struct omap_overlay *ovl;
2838 ovl = omap_dss_get_overlay(i);
2839
2840 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
2841 continue;
2842
2843 if (ovl->id != 0 && ovl->manager == manager)
2844 dispc_enable_plane(ovl->id, 0);
2845 }
2846
2847 dispc_go(manager->id);
2848 mdelay(50);
2849 if (enable)
2850 manager->device->enable(manager->device);
2851 }
2852 }
2853
2854 if (errors & DISPC_IRQ_OCP_ERR) {
2855 DSSERR("OCP_ERR\n");
2856 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
2857 struct omap_overlay_manager *mgr;
2858 mgr = omap_dss_get_overlay_manager(i);
2859
2860 if (mgr->caps & OMAP_DSS_OVL_CAP_DISPC)
2861 mgr->device->disable(mgr->device);
2862 }
2863 }
2864
2865 spin_lock_irqsave(&dispc.irq_lock, flags);
2866 dispc.irq_error_mask |= errors;
2867 _omap_dispc_set_irqs();
2868 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2869}
2870
2871int omap_dispc_wait_for_irq_timeout(u32 irqmask, unsigned long timeout)
2872{
2873 void dispc_irq_wait_handler(void *data, u32 mask)
2874 {
2875 complete((struct completion *)data);
2876 }
2877
2878 int r;
2879 DECLARE_COMPLETION_ONSTACK(completion);
2880
2881 r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
2882 irqmask);
2883
2884 if (r)
2885 return r;
2886
2887 timeout = wait_for_completion_timeout(&completion, timeout);
2888
2889 omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
2890
2891 if (timeout == 0)
2892 return -ETIMEDOUT;
2893
2894 if (timeout == -ERESTARTSYS)
2895 return -ERESTARTSYS;
2896
2897 return 0;
2898}
2899
2900int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
2901 unsigned long timeout)
2902{
2903 void dispc_irq_wait_handler(void *data, u32 mask)
2904 {
2905 complete((struct completion *)data);
2906 }
2907
2908 int r;
2909 DECLARE_COMPLETION_ONSTACK(completion);
2910
2911 r = omap_dispc_register_isr(dispc_irq_wait_handler, &completion,
2912 irqmask);
2913
2914 if (r)
2915 return r;
2916
2917 timeout = wait_for_completion_interruptible_timeout(&completion,
2918 timeout);
2919
2920 omap_dispc_unregister_isr(dispc_irq_wait_handler, &completion, irqmask);
2921
2922 if (timeout == 0)
2923 return -ETIMEDOUT;
2924
2925 if (timeout == -ERESTARTSYS)
2926 return -ERESTARTSYS;
2927
2928 return 0;
2929}
2930
2931#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2932void dispc_fake_vsync_irq(void)
2933{
2934 u32 irqstatus = DISPC_IRQ_VSYNC;
2935 int i;
2936
2937 local_irq_disable();
2938
2939 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
2940 struct omap_dispc_isr_data *isr_data;
2941 isr_data = &dispc.registered_isr[i];
2942
2943 if (!isr_data->isr)
2944 continue;
2945
2946 if (isr_data->mask & irqstatus)
2947 isr_data->isr(isr_data->arg, irqstatus);
2948 }
2949
2950 local_irq_enable();
2951}
2952#endif
2953
2954static void _omap_dispc_initialize_irq(void)
2955{
2956 unsigned long flags;
2957
2958 spin_lock_irqsave(&dispc.irq_lock, flags);
2959
2960 memset(dispc.registered_isr, 0, sizeof(dispc.registered_isr));
2961
2962 dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR;
2963
2964 /* there's SYNC_LOST_DIGIT waiting after enabling the DSS,
2965 * so clear it */
2966 dispc_write_reg(DISPC_IRQSTATUS, dispc_read_reg(DISPC_IRQSTATUS));
2967
2968 _omap_dispc_set_irqs();
2969
2970 spin_unlock_irqrestore(&dispc.irq_lock, flags);
2971}
2972
2973void dispc_enable_sidle(void)
2974{
2975 REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */
2976}
2977
2978void dispc_disable_sidle(void)
2979{
2980 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
2981}
2982
2983static void _omap_dispc_initial_config(void)
2984{
2985 u32 l;
2986
2987 l = dispc_read_reg(DISPC_SYSCONFIG);
2988 l = FLD_MOD(l, 2, 13, 12); /* MIDLEMODE: smart standby */
2989 l = FLD_MOD(l, 2, 4, 3); /* SIDLEMODE: smart idle */
2990 l = FLD_MOD(l, 1, 2, 2); /* ENWAKEUP */
2991 l = FLD_MOD(l, 1, 0, 0); /* AUTOIDLE */
2992 dispc_write_reg(DISPC_SYSCONFIG, l);
2993
2994 /* FUNCGATED */
2995 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
2996
2997 /* L3 firewall setting: enable access to OCM RAM */
2998 /* XXX this should be somewhere in plat-omap */
2999 if (cpu_is_omap24xx())
3000 __raw_writel(0x402000b0, OMAP2_L3_IO_ADDRESS(0x680050a0));
3001
3002 _dispc_setup_color_conv_coef();
3003
3004 dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
3005
3006 dispc_read_plane_fifo_sizes();
3007}
3008
3009int dispc_init(void)
3010{
3011 u32 rev;
3012
3013 spin_lock_init(&dispc.irq_lock);
3014
3015 INIT_WORK(&dispc.error_work, dispc_error_worker);
3016
3017 dispc.base = ioremap(DISPC_BASE, DISPC_SZ_REGS);
3018 if (!dispc.base) {
3019 DSSERR("can't ioremap DISPC\n");
3020 return -ENOMEM;
3021 }
3022
3023 enable_clocks(1);
3024
3025 _omap_dispc_initial_config();
3026
3027 _omap_dispc_initialize_irq();
3028
3029 dispc_save_context();
3030
3031 rev = dispc_read_reg(DISPC_REVISION);
3032 printk(KERN_INFO "OMAP DISPC rev %d.%d\n",
3033 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3034
3035 enable_clocks(0);
3036
3037 return 0;
3038}
3039
3040void dispc_exit(void)
3041{
3042 iounmap(dispc.base);
3043}
3044
3045int dispc_enable_plane(enum omap_plane plane, bool enable)
3046{
3047 DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
3048
3049 enable_clocks(1);
3050 _dispc_enable_plane(plane, enable);
3051 enable_clocks(0);
3052
3053 return 0;
3054}
3055
3056int dispc_setup_plane(enum omap_plane plane,
3057 u32 paddr, u16 screen_width,
3058 u16 pos_x, u16 pos_y,
3059 u16 width, u16 height,
3060 u16 out_width, u16 out_height,
3061 enum omap_color_mode color_mode,
3062 bool ilace,
3063 enum omap_dss_rotation_type rotation_type,
3064 u8 rotation, bool mirror, u8 global_alpha)
3065{
3066 int r = 0;
3067
3068 DSSDBG("dispc_setup_plane %d, pa %x, sw %d, %d,%d, %dx%d -> "
3069 "%dx%d, ilace %d, cmode %x, rot %d, mir %d\n",
3070 plane, paddr, screen_width, pos_x, pos_y,
3071 width, height,
3072 out_width, out_height,
3073 ilace, color_mode,
3074 rotation, mirror);
3075
3076 enable_clocks(1);
3077
3078 r = _dispc_setup_plane(plane,
3079 paddr, screen_width,
3080 pos_x, pos_y,
3081 width, height,
3082 out_width, out_height,
3083 color_mode, ilace,
3084 rotation_type,
3085 rotation, mirror,
3086 global_alpha);
3087
3088 enable_clocks(0);
3089
3090 return r;
3091}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
new file mode 100644
index 000000000000..3b92b84b9560
--- /dev/null
+++ b/drivers/video/omap2/dss/display.c
@@ -0,0 +1,671 @@
1/*
2 * linux/drivers/video/omap2/dss/display.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "DISPLAY"
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/jiffies.h>
28#include <linux/list.h>
29#include <linux/platform_device.h>
30
31#include <plat/display.h>
32#include "dss.h"
33
34static LIST_HEAD(display_list);
35
36static ssize_t display_enabled_show(struct device *dev,
37 struct device_attribute *attr, char *buf)
38{
39 struct omap_dss_device *dssdev = to_dss_device(dev);
40 bool enabled = dssdev->state != OMAP_DSS_DISPLAY_DISABLED;
41
42 return snprintf(buf, PAGE_SIZE, "%d\n", enabled);
43}
44
45static ssize_t display_enabled_store(struct device *dev,
46 struct device_attribute *attr,
47 const char *buf, size_t size)
48{
49 struct omap_dss_device *dssdev = to_dss_device(dev);
50 bool enabled, r;
51
52 enabled = simple_strtoul(buf, NULL, 10);
53
54 if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
55 if (enabled) {
56 r = dssdev->enable(dssdev);
57 if (r)
58 return r;
59 } else {
60 dssdev->disable(dssdev);
61 }
62 }
63
64 return size;
65}
66
67static ssize_t display_upd_mode_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69{
70 struct omap_dss_device *dssdev = to_dss_device(dev);
71 enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
72 if (dssdev->get_update_mode)
73 mode = dssdev->get_update_mode(dssdev);
74 return snprintf(buf, PAGE_SIZE, "%d\n", mode);
75}
76
77static ssize_t display_upd_mode_store(struct device *dev,
78 struct device_attribute *attr,
79 const char *buf, size_t size)
80{
81 struct omap_dss_device *dssdev = to_dss_device(dev);
82 int val, r;
83 enum omap_dss_update_mode mode;
84
85 val = simple_strtoul(buf, NULL, 10);
86
87 switch (val) {
88 case OMAP_DSS_UPDATE_DISABLED:
89 case OMAP_DSS_UPDATE_AUTO:
90 case OMAP_DSS_UPDATE_MANUAL:
91 mode = (enum omap_dss_update_mode)val;
92 break;
93 default:
94 return -EINVAL;
95 }
96
97 r = dssdev->set_update_mode(dssdev, mode);
98 if (r)
99 return r;
100
101 return size;
102}
103
104static ssize_t display_tear_show(struct device *dev,
105 struct device_attribute *attr, char *buf)
106{
107 struct omap_dss_device *dssdev = to_dss_device(dev);
108 return snprintf(buf, PAGE_SIZE, "%d\n",
109 dssdev->get_te ? dssdev->get_te(dssdev) : 0);
110}
111
112static ssize_t display_tear_store(struct device *dev,
113 struct device_attribute *attr, const char *buf, size_t size)
114{
115 struct omap_dss_device *dssdev = to_dss_device(dev);
116 unsigned long te;
117 int r;
118
119 if (!dssdev->enable_te || !dssdev->get_te)
120 return -ENOENT;
121
122 te = simple_strtoul(buf, NULL, 0);
123
124 r = dssdev->enable_te(dssdev, te);
125 if (r)
126 return r;
127
128 return size;
129}
130
131static ssize_t display_timings_show(struct device *dev,
132 struct device_attribute *attr, char *buf)
133{
134 struct omap_dss_device *dssdev = to_dss_device(dev);
135 struct omap_video_timings t;
136
137 if (!dssdev->get_timings)
138 return -ENOENT;
139
140 dssdev->get_timings(dssdev, &t);
141
142 return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
143 t.pixel_clock,
144 t.x_res, t.hfp, t.hbp, t.hsw,
145 t.y_res, t.vfp, t.vbp, t.vsw);
146}
147
148static ssize_t display_timings_store(struct device *dev,
149 struct device_attribute *attr, const char *buf, size_t size)
150{
151 struct omap_dss_device *dssdev = to_dss_device(dev);
152 struct omap_video_timings t;
153 int r, found;
154
155 if (!dssdev->set_timings || !dssdev->check_timings)
156 return -ENOENT;
157
158 found = 0;
159#ifdef CONFIG_OMAP2_DSS_VENC
160 if (strncmp("pal", buf, 3) == 0) {
161 t = omap_dss_pal_timings;
162 found = 1;
163 } else if (strncmp("ntsc", buf, 4) == 0) {
164 t = omap_dss_ntsc_timings;
165 found = 1;
166 }
167#endif
168 if (!found && sscanf(buf, "%u,%hu/%hu/%hu/%hu,%hu/%hu/%hu/%hu",
169 &t.pixel_clock,
170 &t.x_res, &t.hfp, &t.hbp, &t.hsw,
171 &t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
172 return -EINVAL;
173
174 r = dssdev->check_timings(dssdev, &t);
175 if (r)
176 return r;
177
178 dssdev->set_timings(dssdev, &t);
179
180 return size;
181}
182
183static ssize_t display_rotate_show(struct device *dev,
184 struct device_attribute *attr, char *buf)
185{
186 struct omap_dss_device *dssdev = to_dss_device(dev);
187 int rotate;
188 if (!dssdev->get_rotate)
189 return -ENOENT;
190 rotate = dssdev->get_rotate(dssdev);
191 return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
192}
193
194static ssize_t display_rotate_store(struct device *dev,
195 struct device_attribute *attr, const char *buf, size_t size)
196{
197 struct omap_dss_device *dssdev = to_dss_device(dev);
198 unsigned long rot;
199 int r;
200
201 if (!dssdev->set_rotate || !dssdev->get_rotate)
202 return -ENOENT;
203
204 rot = simple_strtoul(buf, NULL, 0);
205
206 r = dssdev->set_rotate(dssdev, rot);
207 if (r)
208 return r;
209
210 return size;
211}
212
213static ssize_t display_mirror_show(struct device *dev,
214 struct device_attribute *attr, char *buf)
215{
216 struct omap_dss_device *dssdev = to_dss_device(dev);
217 int mirror;
218 if (!dssdev->get_mirror)
219 return -ENOENT;
220 mirror = dssdev->get_mirror(dssdev);
221 return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
222}
223
224static ssize_t display_mirror_store(struct device *dev,
225 struct device_attribute *attr, const char *buf, size_t size)
226{
227 struct omap_dss_device *dssdev = to_dss_device(dev);
228 unsigned long mirror;
229 int r;
230
231 if (!dssdev->set_mirror || !dssdev->get_mirror)
232 return -ENOENT;
233
234 mirror = simple_strtoul(buf, NULL, 0);
235
236 r = dssdev->set_mirror(dssdev, mirror);
237 if (r)
238 return r;
239
240 return size;
241}
242
243static ssize_t display_wss_show(struct device *dev,
244 struct device_attribute *attr, char *buf)
245{
246 struct omap_dss_device *dssdev = to_dss_device(dev);
247 unsigned int wss;
248
249 if (!dssdev->get_wss)
250 return -ENOENT;
251
252 wss = dssdev->get_wss(dssdev);
253
254 return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
255}
256
257static ssize_t display_wss_store(struct device *dev,
258 struct device_attribute *attr, const char *buf, size_t size)
259{
260 struct omap_dss_device *dssdev = to_dss_device(dev);
261 unsigned long wss;
262 int r;
263
264 if (!dssdev->get_wss || !dssdev->set_wss)
265 return -ENOENT;
266
267 if (strict_strtoul(buf, 0, &wss))
268 return -EINVAL;
269
270 if (wss > 0xfffff)
271 return -EINVAL;
272
273 r = dssdev->set_wss(dssdev, wss);
274 if (r)
275 return r;
276
277 return size;
278}
279
280static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR,
281 display_enabled_show, display_enabled_store);
282static DEVICE_ATTR(update_mode, S_IRUGO|S_IWUSR,
283 display_upd_mode_show, display_upd_mode_store);
284static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR,
285 display_tear_show, display_tear_store);
286static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR,
287 display_timings_show, display_timings_store);
288static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR,
289 display_rotate_show, display_rotate_store);
290static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR,
291 display_mirror_show, display_mirror_store);
292static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR,
293 display_wss_show, display_wss_store);
294
295static struct device_attribute *display_sysfs_attrs[] = {
296 &dev_attr_enabled,
297 &dev_attr_update_mode,
298 &dev_attr_tear_elim,
299 &dev_attr_timings,
300 &dev_attr_rotate,
301 &dev_attr_mirror,
302 &dev_attr_wss,
303 NULL
304};
305
306static void default_get_resolution(struct omap_dss_device *dssdev,
307 u16 *xres, u16 *yres)
308{
309 *xres = dssdev->panel.timings.x_res;
310 *yres = dssdev->panel.timings.y_res;
311}
312
313void default_get_overlay_fifo_thresholds(enum omap_plane plane,
314 u32 fifo_size, enum omap_burst_size *burst_size,
315 u32 *fifo_low, u32 *fifo_high)
316{
317 unsigned burst_size_bytes;
318
319 *burst_size = OMAP_DSS_BURST_16x32;
320 burst_size_bytes = 16 * 32 / 8;
321
322 *fifo_high = fifo_size - 1;
323 *fifo_low = fifo_size - burst_size_bytes;
324}
325
326static int default_wait_vsync(struct omap_dss_device *dssdev)
327{
328 unsigned long timeout = msecs_to_jiffies(500);
329 u32 irq;
330
331 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
332 irq = DISPC_IRQ_EVSYNC_ODD;
333 else
334 irq = DISPC_IRQ_VSYNC;
335
336 return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
337}
338
339static int default_get_recommended_bpp(struct omap_dss_device *dssdev)
340{
341 if (dssdev->panel.recommended_bpp)
342 return dssdev->panel.recommended_bpp;
343
344 switch (dssdev->type) {
345 case OMAP_DISPLAY_TYPE_DPI:
346 if (dssdev->phy.dpi.data_lines == 24)
347 return 24;
348 else
349 return 16;
350
351 case OMAP_DISPLAY_TYPE_DBI:
352 case OMAP_DISPLAY_TYPE_DSI:
353 if (dssdev->ctrl.pixel_size == 24)
354 return 24;
355 else
356 return 16;
357 case OMAP_DISPLAY_TYPE_VENC:
358 case OMAP_DISPLAY_TYPE_SDI:
359 return 24;
360 return 24;
361 default:
362 BUG();
363 }
364}
365
366/* Checks if replication logic should be used. Only use for active matrix,
367 * when overlay is in RGB12U or RGB16 mode, and LCD interface is
368 * 18bpp or 24bpp */
369bool dss_use_replication(struct omap_dss_device *dssdev,
370 enum omap_color_mode mode)
371{
372 int bpp;
373
374 if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16)
375 return false;
376
377 if (dssdev->type == OMAP_DISPLAY_TYPE_DPI &&
378 (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0)
379 return false;
380
381 switch (dssdev->type) {
382 case OMAP_DISPLAY_TYPE_DPI:
383 bpp = dssdev->phy.dpi.data_lines;
384 break;
385 case OMAP_DISPLAY_TYPE_VENC:
386 case OMAP_DISPLAY_TYPE_SDI:
387 bpp = 24;
388 break;
389 case OMAP_DISPLAY_TYPE_DBI:
390 case OMAP_DISPLAY_TYPE_DSI:
391 bpp = dssdev->ctrl.pixel_size;
392 break;
393 default:
394 BUG();
395 }
396
397 return bpp > 16;
398}
399
400void dss_init_device(struct platform_device *pdev,
401 struct omap_dss_device *dssdev)
402{
403 struct device_attribute *attr;
404 int i;
405 int r;
406
407 switch (dssdev->type) {
408 case OMAP_DISPLAY_TYPE_DPI:
409#ifdef CONFIG_OMAP2_DSS_RFBI
410 case OMAP_DISPLAY_TYPE_DBI:
411#endif
412#ifdef CONFIG_OMAP2_DSS_SDI
413 case OMAP_DISPLAY_TYPE_SDI:
414#endif
415#ifdef CONFIG_OMAP2_DSS_DSI
416 case OMAP_DISPLAY_TYPE_DSI:
417#endif
418#ifdef CONFIG_OMAP2_DSS_VENC
419 case OMAP_DISPLAY_TYPE_VENC:
420#endif
421 break;
422 default:
423 DSSERR("Support for display '%s' not compiled in.\n",
424 dssdev->name);
425 return;
426 }
427
428 dssdev->get_resolution = default_get_resolution;
429 dssdev->get_recommended_bpp = default_get_recommended_bpp;
430 dssdev->wait_vsync = default_wait_vsync;
431
432 switch (dssdev->type) {
433 case OMAP_DISPLAY_TYPE_DPI:
434 r = dpi_init_display(dssdev);
435 break;
436#ifdef CONFIG_OMAP2_DSS_RFBI
437 case OMAP_DISPLAY_TYPE_DBI:
438 r = rfbi_init_display(dssdev);
439 break;
440#endif
441#ifdef CONFIG_OMAP2_DSS_VENC
442 case OMAP_DISPLAY_TYPE_VENC:
443 r = venc_init_display(dssdev);
444 break;
445#endif
446#ifdef CONFIG_OMAP2_DSS_SDI
447 case OMAP_DISPLAY_TYPE_SDI:
448 r = sdi_init_display(dssdev);
449 break;
450#endif
451#ifdef CONFIG_OMAP2_DSS_DSI
452 case OMAP_DISPLAY_TYPE_DSI:
453 r = dsi_init_display(dssdev);
454 break;
455#endif
456 default:
457 BUG();
458 }
459
460 if (r) {
461 DSSERR("failed to init display %s\n", dssdev->name);
462 return;
463 }
464
465 /* create device sysfs files */
466 i = 0;
467 while ((attr = display_sysfs_attrs[i++]) != NULL) {
468 r = device_create_file(&dssdev->dev, attr);
469 if (r)
470 DSSERR("failed to create sysfs file\n");
471 }
472
473 /* create display? sysfs links */
474 r = sysfs_create_link(&pdev->dev.kobj, &dssdev->dev.kobj,
475 dev_name(&dssdev->dev));
476 if (r)
477 DSSERR("failed to create sysfs display link\n");
478}
479
480void dss_uninit_device(struct platform_device *pdev,
481 struct omap_dss_device *dssdev)
482{
483 struct device_attribute *attr;
484 int i = 0;
485
486 sysfs_remove_link(&pdev->dev.kobj, dev_name(&dssdev->dev));
487
488 while ((attr = display_sysfs_attrs[i++]) != NULL)
489 device_remove_file(&dssdev->dev, attr);
490
491 if (dssdev->manager)
492 dssdev->manager->unset_device(dssdev->manager);
493}
494
495static int dss_suspend_device(struct device *dev, void *data)
496{
497 int r;
498 struct omap_dss_device *dssdev = to_dss_device(dev);
499
500 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
501 dssdev->activate_after_resume = false;
502 return 0;
503 }
504
505 if (!dssdev->suspend) {
506 DSSERR("display '%s' doesn't implement suspend\n",
507 dssdev->name);
508 return -ENOSYS;
509 }
510
511 r = dssdev->suspend(dssdev);
512 if (r)
513 return r;
514
515 dssdev->activate_after_resume = true;
516
517 return 0;
518}
519
520int dss_suspend_all_devices(void)
521{
522 int r;
523 struct bus_type *bus = dss_get_bus();
524
525 r = bus_for_each_dev(bus, NULL, NULL, dss_suspend_device);
526 if (r) {
527 /* resume all displays that were suspended */
528 dss_resume_all_devices();
529 return r;
530 }
531
532 return 0;
533}
534
535static int dss_resume_device(struct device *dev, void *data)
536{
537 int r;
538 struct omap_dss_device *dssdev = to_dss_device(dev);
539
540 if (dssdev->activate_after_resume && dssdev->resume) {
541 r = dssdev->resume(dssdev);
542 if (r)
543 return r;
544 }
545
546 dssdev->activate_after_resume = false;
547
548 return 0;
549}
550
551int dss_resume_all_devices(void)
552{
553 struct bus_type *bus = dss_get_bus();
554
555 return bus_for_each_dev(bus, NULL, NULL, dss_resume_device);
556}
557
558static int dss_disable_device(struct device *dev, void *data)
559{
560 struct omap_dss_device *dssdev = to_dss_device(dev);
561 dssdev->disable(dssdev);
562 return 0;
563}
564
565void dss_disable_all_devices(void)
566{
567 struct bus_type *bus = dss_get_bus();
568 bus_for_each_dev(bus, NULL, NULL, dss_disable_device);
569}
570
571
572void omap_dss_get_device(struct omap_dss_device *dssdev)
573{
574 get_device(&dssdev->dev);
575}
576EXPORT_SYMBOL(omap_dss_get_device);
577
578void omap_dss_put_device(struct omap_dss_device *dssdev)
579{
580 put_device(&dssdev->dev);
581}
582EXPORT_SYMBOL(omap_dss_put_device);
583
584/* ref count of the found device is incremented. ref count
585 * of from-device is decremented. */
586struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
587{
588 struct device *dev;
589 struct device *dev_start = NULL;
590 struct omap_dss_device *dssdev = NULL;
591
592 int match(struct device *dev, void *data)
593 {
594 /* skip panels connected to controllers */
595 if (to_dss_device(dev)->panel.ctrl)
596 return 0;
597
598 return 1;
599 }
600
601 if (from)
602 dev_start = &from->dev;
603 dev = bus_find_device(dss_get_bus(), dev_start, NULL, match);
604 if (dev)
605 dssdev = to_dss_device(dev);
606 if (from)
607 put_device(&from->dev);
608
609 return dssdev;
610}
611EXPORT_SYMBOL(omap_dss_get_next_device);
612
613struct omap_dss_device *omap_dss_find_device(void *data,
614 int (*match)(struct omap_dss_device *dssdev, void *data))
615{
616 struct omap_dss_device *dssdev = NULL;
617
618 while ((dssdev = omap_dss_get_next_device(dssdev)) != NULL) {
619 if (match(dssdev, data))
620 return dssdev;
621 }
622
623 return NULL;
624}
625EXPORT_SYMBOL(omap_dss_find_device);
626
627int omap_dss_start_device(struct omap_dss_device *dssdev)
628{
629 int r;
630
631 if (!dssdev->driver) {
632 DSSDBG("no driver\n");
633 r = -ENODEV;
634 goto err0;
635 }
636
637 if (dssdev->ctrl.panel && !dssdev->ctrl.panel->driver) {
638 DSSDBG("no panel driver\n");
639 r = -ENODEV;
640 goto err0;
641 }
642
643 if (!try_module_get(dssdev->dev.driver->owner)) {
644 r = -ENODEV;
645 goto err0;
646 }
647
648 if (dssdev->ctrl.panel) {
649 if (!try_module_get(dssdev->ctrl.panel->dev.driver->owner)) {
650 r = -ENODEV;
651 goto err1;
652 }
653 }
654
655 return 0;
656err1:
657 module_put(dssdev->dev.driver->owner);
658err0:
659 return r;
660}
661EXPORT_SYMBOL(omap_dss_start_device);
662
663void omap_dss_stop_device(struct omap_dss_device *dssdev)
664{
665 if (dssdev->ctrl.panel)
666 module_put(dssdev->ctrl.panel->dev.driver->owner);
667
668 module_put(dssdev->dev.driver->owner);
669}
670EXPORT_SYMBOL(omap_dss_stop_device);
671
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
new file mode 100644
index 000000000000..2d71031baa25
--- /dev/null
+++ b/drivers/video/omap2/dss/dpi.c
@@ -0,0 +1,399 @@
1/*
2 * linux/drivers/video/omap2/dss/dpi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "DPI"
24
25#include <linux/kernel.h>
26#include <linux/clk.h>
27#include <linux/delay.h>
28#include <linux/errno.h>
29
30#include <plat/display.h>
31#include <plat/cpu.h>
32
33#include "dss.h"
34
35static struct {
36 int update_enabled;
37} dpi;
38
39#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
40static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
41 unsigned long *fck, int *lck_div, int *pck_div)
42{
43 struct dsi_clock_info dsi_cinfo;
44 struct dispc_clock_info dispc_cinfo;
45 int r;
46
47 r = dsi_pll_calc_clock_div_pck(is_tft, pck_req, &dsi_cinfo,
48 &dispc_cinfo);
49 if (r)
50 return r;
51
52 r = dsi_pll_set_clock_div(&dsi_cinfo);
53 if (r)
54 return r;
55
56 dss_select_clk_source(0, 1);
57
58 r = dispc_set_clock_div(&dispc_cinfo);
59 if (r)
60 return r;
61
62 *fck = dsi_cinfo.dsi1_pll_fclk;
63 *lck_div = dispc_cinfo.lck_div;
64 *pck_div = dispc_cinfo.pck_div;
65
66 return 0;
67}
68#else
69static int dpi_set_dispc_clk(bool is_tft, unsigned long pck_req,
70 unsigned long *fck, int *lck_div, int *pck_div)
71{
72 struct dss_clock_info dss_cinfo;
73 struct dispc_clock_info dispc_cinfo;
74 int r;
75
76 r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo);
77 if (r)
78 return r;
79
80 r = dss_set_clock_div(&dss_cinfo);
81 if (r)
82 return r;
83
84 r = dispc_set_clock_div(&dispc_cinfo);
85 if (r)
86 return r;
87
88 *fck = dss_cinfo.fck;
89 *lck_div = dispc_cinfo.lck_div;
90 *pck_div = dispc_cinfo.pck_div;
91
92 return 0;
93}
94#endif
95
96static int dpi_set_mode(struct omap_dss_device *dssdev)
97{
98 struct omap_video_timings *t = &dssdev->panel.timings;
99 int lck_div, pck_div;
100 unsigned long fck;
101 unsigned long pck;
102 bool is_tft;
103 int r = 0;
104
105 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
106
107 dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
108 dssdev->panel.acb);
109
110 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
111
112#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
113 r = dpi_set_dsi_clk(is_tft, t->pixel_clock * 1000,
114 &fck, &lck_div, &pck_div);
115#else
116 r = dpi_set_dispc_clk(is_tft, t->pixel_clock * 1000,
117 &fck, &lck_div, &pck_div);
118#endif
119 if (r)
120 goto err0;
121
122 pck = fck / lck_div / pck_div / 1000;
123
124 if (pck != t->pixel_clock) {
125 DSSWARN("Could not find exact pixel clock. "
126 "Requested %d kHz, got %lu kHz\n",
127 t->pixel_clock, pck);
128
129 t->pixel_clock = pck;
130 }
131
132 dispc_set_lcd_timings(t);
133
134err0:
135 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
136 return r;
137}
138
139static int dpi_basic_init(struct omap_dss_device *dssdev)
140{
141 bool is_tft;
142
143 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
144
145 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
146 dispc_set_lcd_display_type(is_tft ? OMAP_DSS_LCD_DISPLAY_TFT :
147 OMAP_DSS_LCD_DISPLAY_STN);
148 dispc_set_tft_data_lines(dssdev->phy.dpi.data_lines);
149
150 return 0;
151}
152
153static int dpi_display_enable(struct omap_dss_device *dssdev)
154{
155 int r;
156
157 r = omap_dss_start_device(dssdev);
158 if (r) {
159 DSSERR("failed to start device\n");
160 goto err0;
161 }
162
163 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
164 DSSERR("display already enabled\n");
165 r = -EINVAL;
166 goto err1;
167 }
168
169 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
170
171 r = dpi_basic_init(dssdev);
172 if (r)
173 goto err2;
174
175#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
176 dss_clk_enable(DSS_CLK_FCK2);
177 r = dsi_pll_init(dssdev, 0, 1);
178 if (r)
179 goto err3;
180#endif
181 r = dpi_set_mode(dssdev);
182 if (r)
183 goto err4;
184
185 mdelay(2);
186
187 dispc_enable_lcd_out(1);
188
189 r = dssdev->driver->enable(dssdev);
190 if (r)
191 goto err5;
192
193 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
194
195 return 0;
196
197err5:
198 dispc_enable_lcd_out(0);
199err4:
200#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
201 dsi_pll_uninit();
202err3:
203 dss_clk_disable(DSS_CLK_FCK2);
204#endif
205err2:
206 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
207err1:
208 omap_dss_stop_device(dssdev);
209err0:
210 return r;
211}
212
213static int dpi_display_resume(struct omap_dss_device *dssdev);
214
215static void dpi_display_disable(struct omap_dss_device *dssdev)
216{
217 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
218 return;
219
220 if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
221 dpi_display_resume(dssdev);
222
223 dssdev->driver->disable(dssdev);
224
225 dispc_enable_lcd_out(0);
226
227#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
228 dss_select_clk_source(0, 0);
229 dsi_pll_uninit();
230 dss_clk_disable(DSS_CLK_FCK2);
231#endif
232
233 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
234
235 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
236
237 omap_dss_stop_device(dssdev);
238}
239
240static int dpi_display_suspend(struct omap_dss_device *dssdev)
241{
242 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
243 return -EINVAL;
244
245 DSSDBG("dpi_display_suspend\n");
246
247 if (dssdev->driver->suspend)
248 dssdev->driver->suspend(dssdev);
249
250 dispc_enable_lcd_out(0);
251
252 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
253
254 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
255
256 return 0;
257}
258
259static int dpi_display_resume(struct omap_dss_device *dssdev)
260{
261 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
262 return -EINVAL;
263
264 DSSDBG("dpi_display_resume\n");
265
266 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
267
268 dispc_enable_lcd_out(1);
269
270 if (dssdev->driver->resume)
271 dssdev->driver->resume(dssdev);
272
273 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
274
275 return 0;
276}
277
278static void dpi_set_timings(struct omap_dss_device *dssdev,
279 struct omap_video_timings *timings)
280{
281 DSSDBG("dpi_set_timings\n");
282 dssdev->panel.timings = *timings;
283 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
284 dpi_set_mode(dssdev);
285 dispc_go(OMAP_DSS_CHANNEL_LCD);
286 }
287}
288
289static int dpi_check_timings(struct omap_dss_device *dssdev,
290 struct omap_video_timings *timings)
291{
292 bool is_tft;
293 int r;
294 int lck_div, pck_div;
295 unsigned long fck;
296 unsigned long pck;
297
298 if (!dispc_lcd_timings_ok(timings))
299 return -EINVAL;
300
301 if (timings->pixel_clock == 0)
302 return -EINVAL;
303
304 is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0;
305
306#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
307 {
308 struct dsi_clock_info dsi_cinfo;
309 struct dispc_clock_info dispc_cinfo;
310 r = dsi_pll_calc_clock_div_pck(is_tft,
311 timings->pixel_clock * 1000,
312 &dsi_cinfo, &dispc_cinfo);
313
314 if (r)
315 return r;
316
317 fck = dsi_cinfo.dsi1_pll_fclk;
318 lck_div = dispc_cinfo.lck_div;
319 pck_div = dispc_cinfo.pck_div;
320 }
321#else
322 {
323 struct dss_clock_info dss_cinfo;
324 struct dispc_clock_info dispc_cinfo;
325 r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000,
326 &dss_cinfo, &dispc_cinfo);
327
328 if (r)
329 return r;
330
331 fck = dss_cinfo.fck;
332 lck_div = dispc_cinfo.lck_div;
333 pck_div = dispc_cinfo.pck_div;
334 }
335#endif
336
337 pck = fck / lck_div / pck_div / 1000;
338
339 timings->pixel_clock = pck;
340
341 return 0;
342}
343
344static void dpi_get_timings(struct omap_dss_device *dssdev,
345 struct omap_video_timings *timings)
346{
347 *timings = dssdev->panel.timings;
348}
349
350static int dpi_display_set_update_mode(struct omap_dss_device *dssdev,
351 enum omap_dss_update_mode mode)
352{
353 if (mode == OMAP_DSS_UPDATE_MANUAL)
354 return -EINVAL;
355
356 if (mode == OMAP_DSS_UPDATE_DISABLED) {
357 dispc_enable_lcd_out(0);
358 dpi.update_enabled = 0;
359 } else {
360 dispc_enable_lcd_out(1);
361 dpi.update_enabled = 1;
362 }
363
364 return 0;
365}
366
367static enum omap_dss_update_mode dpi_display_get_update_mode(
368 struct omap_dss_device *dssdev)
369{
370 return dpi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
371 OMAP_DSS_UPDATE_DISABLED;
372}
373
374int dpi_init_display(struct omap_dss_device *dssdev)
375{
376 DSSDBG("init_display\n");
377
378 dssdev->enable = dpi_display_enable;
379 dssdev->disable = dpi_display_disable;
380 dssdev->suspend = dpi_display_suspend;
381 dssdev->resume = dpi_display_resume;
382 dssdev->set_timings = dpi_set_timings;
383 dssdev->check_timings = dpi_check_timings;
384 dssdev->get_timings = dpi_get_timings;
385 dssdev->set_update_mode = dpi_display_set_update_mode;
386 dssdev->get_update_mode = dpi_display_get_update_mode;
387
388 return 0;
389}
390
391int dpi_init(void)
392{
393 return 0;
394}
395
396void dpi_exit(void)
397{
398}
399
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
new file mode 100644
index 000000000000..5936487b5def
--- /dev/null
+++ b/drivers/video/omap2/dss/dsi.c
@@ -0,0 +1,3710 @@
1/*
2 * linux/drivers/video/omap2/dss/dsi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define DSS_SUBSYS_NAME "DSI"
21
22#include <linux/kernel.h>
23#include <linux/io.h>
24#include <linux/clk.h>
25#include <linux/device.h>
26#include <linux/err.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/mutex.h>
30#include <linux/seq_file.h>
31#include <linux/platform_device.h>
32#include <linux/regulator/consumer.h>
33#include <linux/kthread.h>
34#include <linux/wait.h>
35
36#include <plat/display.h>
37#include <plat/clock.h>
38
39#include "dss.h"
40
41/*#define VERBOSE_IRQ*/
42#define DSI_CATCH_MISSING_TE
43
44#define DSI_BASE 0x4804FC00
45
46struct dsi_reg { u16 idx; };
47
48#define DSI_REG(idx) ((const struct dsi_reg) { idx })
49
50#define DSI_SZ_REGS SZ_1K
51/* DSI Protocol Engine */
52
53#define DSI_REVISION DSI_REG(0x0000)
54#define DSI_SYSCONFIG DSI_REG(0x0010)
55#define DSI_SYSSTATUS DSI_REG(0x0014)
56#define DSI_IRQSTATUS DSI_REG(0x0018)
57#define DSI_IRQENABLE DSI_REG(0x001C)
58#define DSI_CTRL DSI_REG(0x0040)
59#define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
60#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
61#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
62#define DSI_CLK_CTRL DSI_REG(0x0054)
63#define DSI_TIMING1 DSI_REG(0x0058)
64#define DSI_TIMING2 DSI_REG(0x005C)
65#define DSI_VM_TIMING1 DSI_REG(0x0060)
66#define DSI_VM_TIMING2 DSI_REG(0x0064)
67#define DSI_VM_TIMING3 DSI_REG(0x0068)
68#define DSI_CLK_TIMING DSI_REG(0x006C)
69#define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
70#define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
71#define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
72#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
73#define DSI_VM_TIMING4 DSI_REG(0x0080)
74#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
75#define DSI_VM_TIMING5 DSI_REG(0x0088)
76#define DSI_VM_TIMING6 DSI_REG(0x008C)
77#define DSI_VM_TIMING7 DSI_REG(0x0090)
78#define DSI_STOPCLK_TIMING DSI_REG(0x0094)
79#define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
80#define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
81#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
82#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
83#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
84#define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
85#define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
86
87/* DSIPHY_SCP */
88
89#define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
90#define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
91#define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
92#define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
93
94/* DSI_PLL_CTRL_SCP */
95
96#define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
97#define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
98#define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
99#define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
100#define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
101
102#define REG_GET(idx, start, end) \
103 FLD_GET(dsi_read_reg(idx), start, end)
104
105#define REG_FLD_MOD(idx, val, start, end) \
106 dsi_write_reg(idx, FLD_MOD(dsi_read_reg(idx), val, start, end))
107
108/* Global interrupts */
109#define DSI_IRQ_VC0 (1 << 0)
110#define DSI_IRQ_VC1 (1 << 1)
111#define DSI_IRQ_VC2 (1 << 2)
112#define DSI_IRQ_VC3 (1 << 3)
113#define DSI_IRQ_WAKEUP (1 << 4)
114#define DSI_IRQ_RESYNC (1 << 5)
115#define DSI_IRQ_PLL_LOCK (1 << 7)
116#define DSI_IRQ_PLL_UNLOCK (1 << 8)
117#define DSI_IRQ_PLL_RECALL (1 << 9)
118#define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
119#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
120#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
121#define DSI_IRQ_TE_TRIGGER (1 << 16)
122#define DSI_IRQ_ACK_TRIGGER (1 << 17)
123#define DSI_IRQ_SYNC_LOST (1 << 18)
124#define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
125#define DSI_IRQ_TA_TIMEOUT (1 << 20)
126#define DSI_IRQ_ERROR_MASK \
127 (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
128 DSI_IRQ_TA_TIMEOUT)
129#define DSI_IRQ_CHANNEL_MASK 0xf
130
131/* Virtual channel interrupts */
132#define DSI_VC_IRQ_CS (1 << 0)
133#define DSI_VC_IRQ_ECC_CORR (1 << 1)
134#define DSI_VC_IRQ_PACKET_SENT (1 << 2)
135#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
136#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
137#define DSI_VC_IRQ_BTA (1 << 5)
138#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
139#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
140#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
141#define DSI_VC_IRQ_ERROR_MASK \
142 (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
143 DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
144 DSI_VC_IRQ_FIFO_TX_UDF)
145
146/* ComplexIO interrupts */
147#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
148#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
149#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
150#define DSI_CIO_IRQ_ERRESC1 (1 << 5)
151#define DSI_CIO_IRQ_ERRESC2 (1 << 6)
152#define DSI_CIO_IRQ_ERRESC3 (1 << 7)
153#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
154#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
155#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
156#define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
157#define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
158#define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
159#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
160#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
161#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
162#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
163#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
164#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
165#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
166#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
167
168#define DSI_DT_DCS_SHORT_WRITE_0 0x05
169#define DSI_DT_DCS_SHORT_WRITE_1 0x15
170#define DSI_DT_DCS_READ 0x06
171#define DSI_DT_SET_MAX_RET_PKG_SIZE 0x37
172#define DSI_DT_NULL_PACKET 0x09
173#define DSI_DT_DCS_LONG_WRITE 0x39
174
175#define DSI_DT_RX_ACK_WITH_ERR 0x02
176#define DSI_DT_RX_DCS_LONG_READ 0x1c
177#define DSI_DT_RX_SHORT_READ_1 0x21
178#define DSI_DT_RX_SHORT_READ_2 0x22
179
180#define FINT_MAX 2100000
181#define FINT_MIN 750000
182#define REGN_MAX (1 << 7)
183#define REGM_MAX ((1 << 11) - 1)
184#define REGM3_MAX (1 << 4)
185#define REGM4_MAX (1 << 4)
186#define LP_DIV_MAX ((1 << 13) - 1)
187
188enum fifo_size {
189 DSI_FIFO_SIZE_0 = 0,
190 DSI_FIFO_SIZE_32 = 1,
191 DSI_FIFO_SIZE_64 = 2,
192 DSI_FIFO_SIZE_96 = 3,
193 DSI_FIFO_SIZE_128 = 4,
194};
195
196enum dsi_vc_mode {
197 DSI_VC_MODE_L4 = 0,
198 DSI_VC_MODE_VP,
199};
200
201struct dsi_update_region {
202 bool dirty;
203 u16 x, y, w, h;
204 struct omap_dss_device *device;
205};
206
207static struct
208{
209 void __iomem *base;
210
211 struct dsi_clock_info current_cinfo;
212
213 struct regulator *vdds_dsi_reg;
214
215 struct {
216 enum dsi_vc_mode mode;
217 struct omap_dss_device *dssdev;
218 enum fifo_size fifo_size;
219 int dest_per; /* destination peripheral 0-3 */
220 } vc[4];
221
222 struct mutex lock;
223 struct mutex bus_lock;
224
225 unsigned pll_locked;
226
227 struct completion bta_completion;
228
229 struct task_struct *thread;
230 wait_queue_head_t waitqueue;
231
232 spinlock_t update_lock;
233 bool framedone_received;
234 struct dsi_update_region update_region;
235 struct dsi_update_region active_update_region;
236 struct completion update_completion;
237
238 enum omap_dss_update_mode user_update_mode;
239 enum omap_dss_update_mode update_mode;
240 bool te_enabled;
241 bool use_ext_te;
242
243#ifdef DSI_CATCH_MISSING_TE
244 struct timer_list te_timer;
245#endif
246
247 unsigned long cache_req_pck;
248 unsigned long cache_clk_freq;
249 struct dsi_clock_info cache_cinfo;
250
251 u32 errors;
252 spinlock_t errors_lock;
253#ifdef DEBUG
254 ktime_t perf_setup_time;
255 ktime_t perf_start_time;
256 ktime_t perf_start_time_auto;
257 int perf_measure_frames;
258#endif
259 int debug_read;
260 int debug_write;
261} dsi;
262
263#ifdef DEBUG
264static unsigned int dsi_perf;
265module_param_named(dsi_perf, dsi_perf, bool, 0644);
266#endif
267
268static inline void dsi_write_reg(const struct dsi_reg idx, u32 val)
269{
270 __raw_writel(val, dsi.base + idx.idx);
271}
272
273static inline u32 dsi_read_reg(const struct dsi_reg idx)
274{
275 return __raw_readl(dsi.base + idx.idx);
276}
277
278
279void dsi_save_context(void)
280{
281}
282
283void dsi_restore_context(void)
284{
285}
286
287void dsi_bus_lock(void)
288{
289 mutex_lock(&dsi.bus_lock);
290}
291EXPORT_SYMBOL(dsi_bus_lock);
292
293void dsi_bus_unlock(void)
294{
295 mutex_unlock(&dsi.bus_lock);
296}
297EXPORT_SYMBOL(dsi_bus_unlock);
298
299static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
300 int value)
301{
302 int t = 100000;
303
304 while (REG_GET(idx, bitnum, bitnum) != value) {
305 if (--t == 0)
306 return !value;
307 }
308
309 return value;
310}
311
312#ifdef DEBUG
313static void dsi_perf_mark_setup(void)
314{
315 dsi.perf_setup_time = ktime_get();
316}
317
318static void dsi_perf_mark_start(void)
319{
320 dsi.perf_start_time = ktime_get();
321}
322
323static void dsi_perf_mark_start_auto(void)
324{
325 dsi.perf_measure_frames = 0;
326 dsi.perf_start_time_auto = ktime_get();
327}
328
329static void dsi_perf_show(const char *name)
330{
331 ktime_t t, setup_time, trans_time;
332 u32 total_bytes;
333 u32 setup_us, trans_us, total_us;
334
335 if (!dsi_perf)
336 return;
337
338 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
339 return;
340
341 t = ktime_get();
342
343 setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
344 setup_us = (u32)ktime_to_us(setup_time);
345 if (setup_us == 0)
346 setup_us = 1;
347
348 trans_time = ktime_sub(t, dsi.perf_start_time);
349 trans_us = (u32)ktime_to_us(trans_time);
350 if (trans_us == 0)
351 trans_us = 1;
352
353 total_us = setup_us + trans_us;
354
355 total_bytes = dsi.active_update_region.w *
356 dsi.active_update_region.h *
357 dsi.active_update_region.device->ctrl.pixel_size / 8;
358
359 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
360 static u32 s_total_trans_us, s_total_setup_us;
361 static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
362 static u32 s_max_trans_us, s_max_setup_us;
363 const int numframes = 100;
364 ktime_t total_time_auto;
365 u32 total_time_auto_us;
366
367 dsi.perf_measure_frames++;
368
369 if (setup_us < s_min_setup_us)
370 s_min_setup_us = setup_us;
371
372 if (setup_us > s_max_setup_us)
373 s_max_setup_us = setup_us;
374
375 s_total_setup_us += setup_us;
376
377 if (trans_us < s_min_trans_us)
378 s_min_trans_us = trans_us;
379
380 if (trans_us > s_max_trans_us)
381 s_max_trans_us = trans_us;
382
383 s_total_trans_us += trans_us;
384
385 if (dsi.perf_measure_frames < numframes)
386 return;
387
388 total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
389 total_time_auto_us = (u32)ktime_to_us(total_time_auto);
390
391 printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
392 "trans %u/%u/%u\n",
393 name,
394 1000 * 1000 * numframes / total_time_auto_us,
395 s_min_setup_us,
396 s_max_setup_us,
397 s_total_setup_us / numframes,
398 s_min_trans_us,
399 s_max_trans_us,
400 s_total_trans_us / numframes);
401
402 s_total_setup_us = 0;
403 s_min_setup_us = 0xffffffff;
404 s_max_setup_us = 0;
405 s_total_trans_us = 0;
406 s_min_trans_us = 0xffffffff;
407 s_max_trans_us = 0;
408 dsi_perf_mark_start_auto();
409 } else {
410 printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
411 "%u bytes, %u kbytes/sec\n",
412 name,
413 setup_us,
414 trans_us,
415 total_us,
416 1000*1000 / total_us,
417 total_bytes,
418 total_bytes * 1000 / total_us);
419 }
420}
421#else
422#define dsi_perf_mark_setup()
423#define dsi_perf_mark_start()
424#define dsi_perf_mark_start_auto()
425#define dsi_perf_show(x)
426#endif
427
428static void print_irq_status(u32 status)
429{
430#ifndef VERBOSE_IRQ
431 if ((status & ~DSI_IRQ_CHANNEL_MASK) == 0)
432 return;
433#endif
434 printk(KERN_DEBUG "DSI IRQ: 0x%x: ", status);
435
436#define PIS(x) \
437 if (status & DSI_IRQ_##x) \
438 printk(#x " ");
439#ifdef VERBOSE_IRQ
440 PIS(VC0);
441 PIS(VC1);
442 PIS(VC2);
443 PIS(VC3);
444#endif
445 PIS(WAKEUP);
446 PIS(RESYNC);
447 PIS(PLL_LOCK);
448 PIS(PLL_UNLOCK);
449 PIS(PLL_RECALL);
450 PIS(COMPLEXIO_ERR);
451 PIS(HS_TX_TIMEOUT);
452 PIS(LP_RX_TIMEOUT);
453 PIS(TE_TRIGGER);
454 PIS(ACK_TRIGGER);
455 PIS(SYNC_LOST);
456 PIS(LDO_POWER_GOOD);
457 PIS(TA_TIMEOUT);
458#undef PIS
459
460 printk("\n");
461}
462
463static void print_irq_status_vc(int channel, u32 status)
464{
465#ifndef VERBOSE_IRQ
466 if ((status & ~DSI_VC_IRQ_PACKET_SENT) == 0)
467 return;
468#endif
469 printk(KERN_DEBUG "DSI VC(%d) IRQ 0x%x: ", channel, status);
470
471#define PIS(x) \
472 if (status & DSI_VC_IRQ_##x) \
473 printk(#x " ");
474 PIS(CS);
475 PIS(ECC_CORR);
476#ifdef VERBOSE_IRQ
477 PIS(PACKET_SENT);
478#endif
479 PIS(FIFO_TX_OVF);
480 PIS(FIFO_RX_OVF);
481 PIS(BTA);
482 PIS(ECC_NO_CORR);
483 PIS(FIFO_TX_UDF);
484 PIS(PP_BUSY_CHANGE);
485#undef PIS
486 printk("\n");
487}
488
489static void print_irq_status_cio(u32 status)
490{
491 printk(KERN_DEBUG "DSI CIO IRQ 0x%x: ", status);
492
493#define PIS(x) \
494 if (status & DSI_CIO_IRQ_##x) \
495 printk(#x " ");
496 PIS(ERRSYNCESC1);
497 PIS(ERRSYNCESC2);
498 PIS(ERRSYNCESC3);
499 PIS(ERRESC1);
500 PIS(ERRESC2);
501 PIS(ERRESC3);
502 PIS(ERRCONTROL1);
503 PIS(ERRCONTROL2);
504 PIS(ERRCONTROL3);
505 PIS(STATEULPS1);
506 PIS(STATEULPS2);
507 PIS(STATEULPS3);
508 PIS(ERRCONTENTIONLP0_1);
509 PIS(ERRCONTENTIONLP1_1);
510 PIS(ERRCONTENTIONLP0_2);
511 PIS(ERRCONTENTIONLP1_2);
512 PIS(ERRCONTENTIONLP0_3);
513 PIS(ERRCONTENTIONLP1_3);
514 PIS(ULPSACTIVENOT_ALL0);
515 PIS(ULPSACTIVENOT_ALL1);
516#undef PIS
517
518 printk("\n");
519}
520
521static int debug_irq;
522
523/* called from dss */
524void dsi_irq_handler(void)
525{
526 u32 irqstatus, vcstatus, ciostatus;
527 int i;
528
529 irqstatus = dsi_read_reg(DSI_IRQSTATUS);
530
531 if (irqstatus & DSI_IRQ_ERROR_MASK) {
532 DSSERR("DSI error, irqstatus %x\n", irqstatus);
533 print_irq_status(irqstatus);
534 spin_lock(&dsi.errors_lock);
535 dsi.errors |= irqstatus & DSI_IRQ_ERROR_MASK;
536 spin_unlock(&dsi.errors_lock);
537 } else if (debug_irq) {
538 print_irq_status(irqstatus);
539 }
540
541#ifdef DSI_CATCH_MISSING_TE
542 if (irqstatus & DSI_IRQ_TE_TRIGGER)
543 del_timer(&dsi.te_timer);
544#endif
545
546 for (i = 0; i < 4; ++i) {
547 if ((irqstatus & (1<<i)) == 0)
548 continue;
549
550 vcstatus = dsi_read_reg(DSI_VC_IRQSTATUS(i));
551
552 if (vcstatus & DSI_VC_IRQ_BTA)
553 complete(&dsi.bta_completion);
554
555 if (vcstatus & DSI_VC_IRQ_ERROR_MASK) {
556 DSSERR("DSI VC(%d) error, vc irqstatus %x\n",
557 i, vcstatus);
558 print_irq_status_vc(i, vcstatus);
559 } else if (debug_irq) {
560 print_irq_status_vc(i, vcstatus);
561 }
562
563 dsi_write_reg(DSI_VC_IRQSTATUS(i), vcstatus);
564 /* flush posted write */
565 dsi_read_reg(DSI_VC_IRQSTATUS(i));
566 }
567
568 if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
569 ciostatus = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
570
571 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
572 /* flush posted write */
573 dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
574
575 DSSERR("DSI CIO error, cio irqstatus %x\n", ciostatus);
576 print_irq_status_cio(ciostatus);
577 }
578
579 dsi_write_reg(DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
580 /* flush posted write */
581 dsi_read_reg(DSI_IRQSTATUS);
582}
583
584
585static void _dsi_initialize_irq(void)
586{
587 u32 l;
588 int i;
589
590 /* disable all interrupts */
591 dsi_write_reg(DSI_IRQENABLE, 0);
592 for (i = 0; i < 4; ++i)
593 dsi_write_reg(DSI_VC_IRQENABLE(i), 0);
594 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE, 0);
595
596 /* clear interrupt status */
597 l = dsi_read_reg(DSI_IRQSTATUS);
598 dsi_write_reg(DSI_IRQSTATUS, l & ~DSI_IRQ_CHANNEL_MASK);
599
600 for (i = 0; i < 4; ++i) {
601 l = dsi_read_reg(DSI_VC_IRQSTATUS(i));
602 dsi_write_reg(DSI_VC_IRQSTATUS(i), l);
603 }
604
605 l = dsi_read_reg(DSI_COMPLEXIO_IRQ_STATUS);
606 dsi_write_reg(DSI_COMPLEXIO_IRQ_STATUS, l);
607
608 /* enable error irqs */
609 l = DSI_IRQ_ERROR_MASK;
610#ifdef DSI_CATCH_MISSING_TE
611 l |= DSI_IRQ_TE_TRIGGER;
612#endif
613 dsi_write_reg(DSI_IRQENABLE, l);
614
615 l = DSI_VC_IRQ_ERROR_MASK;
616 for (i = 0; i < 4; ++i)
617 dsi_write_reg(DSI_VC_IRQENABLE(i), l);
618
619 /* XXX zonda responds incorrectly, causing control error:
620 Exit from LP-ESC mode to LP11 uses wrong transition states on the
621 data lines LP0 and LN0. */
622 dsi_write_reg(DSI_COMPLEXIO_IRQ_ENABLE,
623 -1 & (~DSI_CIO_IRQ_ERRCONTROL2));
624}
625
626static u32 dsi_get_errors(void)
627{
628 unsigned long flags;
629 u32 e;
630 spin_lock_irqsave(&dsi.errors_lock, flags);
631 e = dsi.errors;
632 dsi.errors = 0;
633 spin_unlock_irqrestore(&dsi.errors_lock, flags);
634 return e;
635}
636
637static void dsi_vc_enable_bta_irq(int channel)
638{
639 u32 l;
640
641 dsi_write_reg(DSI_VC_IRQSTATUS(channel), DSI_VC_IRQ_BTA);
642
643 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
644 l |= DSI_VC_IRQ_BTA;
645 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
646}
647
648static void dsi_vc_disable_bta_irq(int channel)
649{
650 u32 l;
651
652 l = dsi_read_reg(DSI_VC_IRQENABLE(channel));
653 l &= ~DSI_VC_IRQ_BTA;
654 dsi_write_reg(DSI_VC_IRQENABLE(channel), l);
655}
656
657/* DSI func clock. this could also be DSI2_PLL_FCLK */
658static inline void enable_clocks(bool enable)
659{
660 if (enable)
661 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
662 else
663 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
664}
665
666/* source clock for DSI PLL. this could also be PCLKFREE */
667static inline void dsi_enable_pll_clock(bool enable)
668{
669 if (enable)
670 dss_clk_enable(DSS_CLK_FCK2);
671 else
672 dss_clk_disable(DSS_CLK_FCK2);
673
674 if (enable && dsi.pll_locked) {
675 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1)
676 DSSERR("cannot lock PLL when enabling clocks\n");
677 }
678}
679
680#ifdef DEBUG
681static void _dsi_print_reset_status(void)
682{
683 u32 l;
684
685 if (!dss_debug)
686 return;
687
688 /* A dummy read using the SCP interface to any DSIPHY register is
689 * required after DSIPHY reset to complete the reset of the DSI complex
690 * I/O. */
691 l = dsi_read_reg(DSI_DSIPHY_CFG5);
692
693 printk(KERN_DEBUG "DSI resets: ");
694
695 l = dsi_read_reg(DSI_PLL_STATUS);
696 printk("PLL (%d) ", FLD_GET(l, 0, 0));
697
698 l = dsi_read_reg(DSI_COMPLEXIO_CFG1);
699 printk("CIO (%d) ", FLD_GET(l, 29, 29));
700
701 l = dsi_read_reg(DSI_DSIPHY_CFG5);
702 printk("PHY (%x, %d, %d, %d)\n",
703 FLD_GET(l, 28, 26),
704 FLD_GET(l, 29, 29),
705 FLD_GET(l, 30, 30),
706 FLD_GET(l, 31, 31));
707}
708#else
709#define _dsi_print_reset_status()
710#endif
711
712static inline int dsi_if_enable(bool enable)
713{
714 DSSDBG("dsi_if_enable(%d)\n", enable);
715
716 enable = enable ? 1 : 0;
717 REG_FLD_MOD(DSI_CTRL, enable, 0, 0); /* IF_EN */
718
719 if (wait_for_bit_change(DSI_CTRL, 0, enable) != enable) {
720 DSSERR("Failed to set dsi_if_enable to %d\n", enable);
721 return -EIO;
722 }
723
724 return 0;
725}
726
727unsigned long dsi_get_dsi1_pll_rate(void)
728{
729 return dsi.current_cinfo.dsi1_pll_fclk;
730}
731
732static unsigned long dsi_get_dsi2_pll_rate(void)
733{
734 return dsi.current_cinfo.dsi2_pll_fclk;
735}
736
737static unsigned long dsi_get_txbyteclkhs(void)
738{
739 return dsi.current_cinfo.clkin4ddr / 16;
740}
741
742static unsigned long dsi_fclk_rate(void)
743{
744 unsigned long r;
745
746 if (dss_get_dsi_clk_source() == 0) {
747 /* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
748 r = dss_clk_get_rate(DSS_CLK_FCK1);
749 } else {
750 /* DSI FCLK source is DSI2_PLL_FCLK */
751 r = dsi_get_dsi2_pll_rate();
752 }
753
754 return r;
755}
756
757static int dsi_set_lp_clk_divisor(struct omap_dss_device *dssdev)
758{
759 unsigned long dsi_fclk;
760 unsigned lp_clk_div;
761 unsigned long lp_clk;
762
763 lp_clk_div = dssdev->phy.dsi.div.lp_clk_div;
764
765 if (lp_clk_div == 0 || lp_clk_div > LP_DIV_MAX)
766 return -EINVAL;
767
768 dsi_fclk = dsi_fclk_rate();
769
770 lp_clk = dsi_fclk / 2 / lp_clk_div;
771
772 DSSDBG("LP_CLK_DIV %u, LP_CLK %lu\n", lp_clk_div, lp_clk);
773 dsi.current_cinfo.lp_clk = lp_clk;
774 dsi.current_cinfo.lp_clk_div = lp_clk_div;
775
776 REG_FLD_MOD(DSI_CLK_CTRL, lp_clk_div, 12, 0); /* LP_CLK_DIVISOR */
777
778 REG_FLD_MOD(DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0,
779 21, 21); /* LP_RX_SYNCHRO_ENABLE */
780
781 return 0;
782}
783
784
785enum dsi_pll_power_state {
786 DSI_PLL_POWER_OFF = 0x0,
787 DSI_PLL_POWER_ON_HSCLK = 0x1,
788 DSI_PLL_POWER_ON_ALL = 0x2,
789 DSI_PLL_POWER_ON_DIV = 0x3,
790};
791
792static int dsi_pll_power(enum dsi_pll_power_state state)
793{
794 int t = 0;
795
796 REG_FLD_MOD(DSI_CLK_CTRL, state, 31, 30); /* PLL_PWR_CMD */
797
798 /* PLL_PWR_STATUS */
799 while (FLD_GET(dsi_read_reg(DSI_CLK_CTRL), 29, 28) != state) {
800 udelay(1);
801 if (t++ > 1000) {
802 DSSERR("Failed to set DSI PLL power mode to %d\n",
803 state);
804 return -ENODEV;
805 }
806 }
807
808 return 0;
809}
810
811/* calculate clock rates using dividers in cinfo */
812static int dsi_calc_clock_rates(struct dsi_clock_info *cinfo)
813{
814 if (cinfo->regn == 0 || cinfo->regn > REGN_MAX)
815 return -EINVAL;
816
817 if (cinfo->regm == 0 || cinfo->regm > REGM_MAX)
818 return -EINVAL;
819
820 if (cinfo->regm3 > REGM3_MAX)
821 return -EINVAL;
822
823 if (cinfo->regm4 > REGM4_MAX)
824 return -EINVAL;
825
826 if (cinfo->use_dss2_fck) {
827 cinfo->clkin = dss_clk_get_rate(DSS_CLK_FCK2);
828 /* XXX it is unclear if highfreq should be used
829 * with DSS2_FCK source also */
830 cinfo->highfreq = 0;
831 } else {
832 cinfo->clkin = dispc_pclk_rate();
833
834 if (cinfo->clkin < 32000000)
835 cinfo->highfreq = 0;
836 else
837 cinfo->highfreq = 1;
838 }
839
840 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
841
842 if (cinfo->fint > FINT_MAX || cinfo->fint < FINT_MIN)
843 return -EINVAL;
844
845 cinfo->clkin4ddr = 2 * cinfo->regm * cinfo->fint;
846
847 if (cinfo->clkin4ddr > 1800 * 1000 * 1000)
848 return -EINVAL;
849
850 if (cinfo->regm3 > 0)
851 cinfo->dsi1_pll_fclk = cinfo->clkin4ddr / cinfo->regm3;
852 else
853 cinfo->dsi1_pll_fclk = 0;
854
855 if (cinfo->regm4 > 0)
856 cinfo->dsi2_pll_fclk = cinfo->clkin4ddr / cinfo->regm4;
857 else
858 cinfo->dsi2_pll_fclk = 0;
859
860 return 0;
861}
862
863int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
864 struct dsi_clock_info *dsi_cinfo,
865 struct dispc_clock_info *dispc_cinfo)
866{
867 struct dsi_clock_info cur, best;
868 struct dispc_clock_info best_dispc;
869 int min_fck_per_pck;
870 int match = 0;
871 unsigned long dss_clk_fck2;
872
873 dss_clk_fck2 = dss_clk_get_rate(DSS_CLK_FCK2);
874
875 if (req_pck == dsi.cache_req_pck &&
876 dsi.cache_cinfo.clkin == dss_clk_fck2) {
877 DSSDBG("DSI clock info found from cache\n");
878 *dsi_cinfo = dsi.cache_cinfo;
879 dispc_find_clk_divs(is_tft, req_pck, dsi_cinfo->dsi1_pll_fclk,
880 dispc_cinfo);
881 return 0;
882 }
883
884 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
885
886 if (min_fck_per_pck &&
887 req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
888 DSSERR("Requested pixel clock not possible with the current "
889 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
890 "the constraint off.\n");
891 min_fck_per_pck = 0;
892 }
893
894 DSSDBG("dsi_pll_calc\n");
895
896retry:
897 memset(&best, 0, sizeof(best));
898 memset(&best_dispc, 0, sizeof(best_dispc));
899
900 memset(&cur, 0, sizeof(cur));
901 cur.clkin = dss_clk_fck2;
902 cur.use_dss2_fck = 1;
903 cur.highfreq = 0;
904
905 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
906 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
907 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
908 for (cur.regn = 1; cur.regn < REGN_MAX; ++cur.regn) {
909 if (cur.highfreq == 0)
910 cur.fint = cur.clkin / cur.regn;
911 else
912 cur.fint = cur.clkin / (2 * cur.regn);
913
914 if (cur.fint > FINT_MAX || cur.fint < FINT_MIN)
915 continue;
916
917 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
918 for (cur.regm = 1; cur.regm < REGM_MAX; ++cur.regm) {
919 unsigned long a, b;
920
921 a = 2 * cur.regm * (cur.clkin/1000);
922 b = cur.regn * (cur.highfreq + 1);
923 cur.clkin4ddr = a / b * 1000;
924
925 if (cur.clkin4ddr > 1800 * 1000 * 1000)
926 break;
927
928 /* DSI1_PLL_FCLK(MHz) = DSIPHY(MHz) / regm3 < 173MHz */
929 for (cur.regm3 = 1; cur.regm3 < REGM3_MAX;
930 ++cur.regm3) {
931 struct dispc_clock_info cur_dispc;
932 cur.dsi1_pll_fclk = cur.clkin4ddr / cur.regm3;
933
934 /* this will narrow down the search a bit,
935 * but still give pixclocks below what was
936 * requested */
937 if (cur.dsi1_pll_fclk < req_pck)
938 break;
939
940 if (cur.dsi1_pll_fclk > DISPC_MAX_FCK)
941 continue;
942
943 if (min_fck_per_pck &&
944 cur.dsi1_pll_fclk <
945 req_pck * min_fck_per_pck)
946 continue;
947
948 match = 1;
949
950 dispc_find_clk_divs(is_tft, req_pck,
951 cur.dsi1_pll_fclk,
952 &cur_dispc);
953
954 if (abs(cur_dispc.pck - req_pck) <
955 abs(best_dispc.pck - req_pck)) {
956 best = cur;
957 best_dispc = cur_dispc;
958
959 if (cur_dispc.pck == req_pck)
960 goto found;
961 }
962 }
963 }
964 }
965found:
966 if (!match) {
967 if (min_fck_per_pck) {
968 DSSERR("Could not find suitable clock settings.\n"
969 "Turning FCK/PCK constraint off and"
970 "trying again.\n");
971 min_fck_per_pck = 0;
972 goto retry;
973 }
974
975 DSSERR("Could not find suitable clock settings.\n");
976
977 return -EINVAL;
978 }
979
980 /* DSI2_PLL_FCLK (regm4) is not used */
981 best.regm4 = 0;
982 best.dsi2_pll_fclk = 0;
983
984 if (dsi_cinfo)
985 *dsi_cinfo = best;
986 if (dispc_cinfo)
987 *dispc_cinfo = best_dispc;
988
989 dsi.cache_req_pck = req_pck;
990 dsi.cache_clk_freq = 0;
991 dsi.cache_cinfo = best;
992
993 return 0;
994}
995
996int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo)
997{
998 int r = 0;
999 u32 l;
1000 int f;
1001
1002 DSSDBGF();
1003
1004 dsi.current_cinfo.fint = cinfo->fint;
1005 dsi.current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1006 dsi.current_cinfo.dsi1_pll_fclk = cinfo->dsi1_pll_fclk;
1007 dsi.current_cinfo.dsi2_pll_fclk = cinfo->dsi2_pll_fclk;
1008
1009 dsi.current_cinfo.regn = cinfo->regn;
1010 dsi.current_cinfo.regm = cinfo->regm;
1011 dsi.current_cinfo.regm3 = cinfo->regm3;
1012 dsi.current_cinfo.regm4 = cinfo->regm4;
1013
1014 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1015
1016 DSSDBG("clkin (%s) rate %ld, highfreq %d\n",
1017 cinfo->use_dss2_fck ? "dss2_fck" : "pclkfree",
1018 cinfo->clkin,
1019 cinfo->highfreq);
1020
1021 /* DSIPHY == CLKIN4DDR */
1022 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n",
1023 cinfo->regm,
1024 cinfo->regn,
1025 cinfo->clkin,
1026 cinfo->highfreq + 1,
1027 cinfo->clkin4ddr);
1028
1029 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
1030 cinfo->clkin4ddr / 1000 / 1000 / 2);
1031
1032 DSSDBG("Clock lane freq %ld Hz\n", cinfo->clkin4ddr / 4);
1033
1034 DSSDBG("regm3 = %d, dsi1_pll_fclk = %lu\n",
1035 cinfo->regm3, cinfo->dsi1_pll_fclk);
1036 DSSDBG("regm4 = %d, dsi2_pll_fclk = %lu\n",
1037 cinfo->regm4, cinfo->dsi2_pll_fclk);
1038
1039 REG_FLD_MOD(DSI_PLL_CONTROL, 0, 0, 0); /* DSI_PLL_AUTOMODE = manual */
1040
1041 l = dsi_read_reg(DSI_PLL_CONFIGURATION1);
1042 l = FLD_MOD(l, 1, 0, 0); /* DSI_PLL_STOPMODE */
1043 l = FLD_MOD(l, cinfo->regn - 1, 7, 1); /* DSI_PLL_REGN */
1044 l = FLD_MOD(l, cinfo->regm, 18, 8); /* DSI_PLL_REGM */
1045 l = FLD_MOD(l, cinfo->regm3 > 0 ? cinfo->regm3 - 1 : 0,
1046 22, 19); /* DSI_CLOCK_DIV */
1047 l = FLD_MOD(l, cinfo->regm4 > 0 ? cinfo->regm4 - 1 : 0,
1048 26, 23); /* DSIPROTO_CLOCK_DIV */
1049 dsi_write_reg(DSI_PLL_CONFIGURATION1, l);
1050
1051 BUG_ON(cinfo->fint < 750000 || cinfo->fint > 2100000);
1052 if (cinfo->fint < 1000000)
1053 f = 0x3;
1054 else if (cinfo->fint < 1250000)
1055 f = 0x4;
1056 else if (cinfo->fint < 1500000)
1057 f = 0x5;
1058 else if (cinfo->fint < 1750000)
1059 f = 0x6;
1060 else
1061 f = 0x7;
1062
1063 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1064 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1065 l = FLD_MOD(l, cinfo->use_dss2_fck ? 0 : 1,
1066 11, 11); /* DSI_PLL_CLKSEL */
1067 l = FLD_MOD(l, cinfo->highfreq,
1068 12, 12); /* DSI_PLL_HIGHFREQ */
1069 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1070 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1071 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
1072 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1073
1074 REG_FLD_MOD(DSI_PLL_GO, 1, 0, 0); /* DSI_PLL_GO */
1075
1076 if (wait_for_bit_change(DSI_PLL_GO, 0, 0) != 0) {
1077 DSSERR("dsi pll go bit not going down.\n");
1078 r = -EIO;
1079 goto err;
1080 }
1081
1082 if (wait_for_bit_change(DSI_PLL_STATUS, 1, 1) != 1) {
1083 DSSERR("cannot lock PLL\n");
1084 r = -EIO;
1085 goto err;
1086 }
1087
1088 dsi.pll_locked = 1;
1089
1090 l = dsi_read_reg(DSI_PLL_CONFIGURATION2);
1091 l = FLD_MOD(l, 0, 0, 0); /* DSI_PLL_IDLE */
1092 l = FLD_MOD(l, 0, 5, 5); /* DSI_PLL_PLLLPMODE */
1093 l = FLD_MOD(l, 0, 6, 6); /* DSI_PLL_LOWCURRSTBY */
1094 l = FLD_MOD(l, 0, 7, 7); /* DSI_PLL_TIGHTPHASELOCK */
1095 l = FLD_MOD(l, 0, 8, 8); /* DSI_PLL_DRIFTGUARDEN */
1096 l = FLD_MOD(l, 0, 10, 9); /* DSI_PLL_LOCKSEL */
1097 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1098 l = FLD_MOD(l, 1, 14, 14); /* DSIPHY_CLKINEN */
1099 l = FLD_MOD(l, 0, 15, 15); /* DSI_BYPASSEN */
1100 l = FLD_MOD(l, 1, 16, 16); /* DSS_CLOCK_EN */
1101 l = FLD_MOD(l, 0, 17, 17); /* DSS_CLOCK_PWDN */
1102 l = FLD_MOD(l, 1, 18, 18); /* DSI_PROTO_CLOCK_EN */
1103 l = FLD_MOD(l, 0, 19, 19); /* DSI_PROTO_CLOCK_PWDN */
1104 l = FLD_MOD(l, 0, 20, 20); /* DSI_HSDIVBYPASS */
1105 dsi_write_reg(DSI_PLL_CONFIGURATION2, l);
1106
1107 DSSDBG("PLL config done\n");
1108err:
1109 return r;
1110}
1111
1112int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
1113 bool enable_hsdiv)
1114{
1115 int r = 0;
1116 enum dsi_pll_power_state pwstate;
1117
1118 DSSDBG("PLL init\n");
1119
1120 enable_clocks(1);
1121 dsi_enable_pll_clock(1);
1122
1123 r = regulator_enable(dsi.vdds_dsi_reg);
1124 if (r)
1125 goto err0;
1126
1127 /* XXX PLL does not come out of reset without this... */
1128 dispc_pck_free_enable(1);
1129
1130 if (wait_for_bit_change(DSI_PLL_STATUS, 0, 1) != 1) {
1131 DSSERR("PLL not coming out of reset.\n");
1132 r = -ENODEV;
1133 goto err1;
1134 }
1135
1136 /* XXX ... but if left on, we get problems when planes do not
1137 * fill the whole display. No idea about this */
1138 dispc_pck_free_enable(0);
1139
1140 if (enable_hsclk && enable_hsdiv)
1141 pwstate = DSI_PLL_POWER_ON_ALL;
1142 else if (enable_hsclk)
1143 pwstate = DSI_PLL_POWER_ON_HSCLK;
1144 else if (enable_hsdiv)
1145 pwstate = DSI_PLL_POWER_ON_DIV;
1146 else
1147 pwstate = DSI_PLL_POWER_OFF;
1148
1149 r = dsi_pll_power(pwstate);
1150
1151 if (r)
1152 goto err1;
1153
1154 DSSDBG("PLL init done\n");
1155
1156 return 0;
1157err1:
1158 regulator_disable(dsi.vdds_dsi_reg);
1159err0:
1160 enable_clocks(0);
1161 dsi_enable_pll_clock(0);
1162 return r;
1163}
1164
1165void dsi_pll_uninit(void)
1166{
1167 enable_clocks(0);
1168 dsi_enable_pll_clock(0);
1169
1170 dsi.pll_locked = 0;
1171 dsi_pll_power(DSI_PLL_POWER_OFF);
1172 regulator_disable(dsi.vdds_dsi_reg);
1173 DSSDBG("PLL uninit done\n");
1174}
1175
1176void dsi_dump_clocks(struct seq_file *s)
1177{
1178 int clksel;
1179 struct dsi_clock_info *cinfo = &dsi.current_cinfo;
1180
1181 enable_clocks(1);
1182
1183 clksel = REG_GET(DSI_PLL_CONFIGURATION2, 11, 11);
1184
1185 seq_printf(s, "- DSI PLL -\n");
1186
1187 seq_printf(s, "dsi pll source = %s\n",
1188 clksel == 0 ?
1189 "dss2_alwon_fclk" : "pclkfree");
1190
1191 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1192
1193 seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
1194 cinfo->clkin4ddr, cinfo->regm);
1195
1196 seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
1197 cinfo->dsi1_pll_fclk,
1198 cinfo->regm3,
1199 dss_get_dispc_clk_source() == 0 ? "off" : "on");
1200
1201 seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
1202 cinfo->dsi2_pll_fclk,
1203 cinfo->regm4,
1204 dss_get_dsi_clk_source() == 0 ? "off" : "on");
1205
1206 seq_printf(s, "- DSI -\n");
1207
1208 seq_printf(s, "dsi fclk source = %s\n",
1209 dss_get_dsi_clk_source() == 0 ?
1210 "dss1_alwon_fclk" : "dsi2_pll_fclk");
1211
1212 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
1213
1214 seq_printf(s, "DDR_CLK\t\t%lu\n",
1215 cinfo->clkin4ddr / 4);
1216
1217 seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs());
1218
1219 seq_printf(s, "LP_CLK\t\t%lu\n", cinfo->lp_clk);
1220
1221 seq_printf(s, "VP_CLK\t\t%lu\n"
1222 "VP_PCLK\t\t%lu\n",
1223 dispc_lclk_rate(),
1224 dispc_pclk_rate());
1225
1226 enable_clocks(0);
1227}
1228
1229void dsi_dump_regs(struct seq_file *s)
1230{
1231#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(r))
1232
1233 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
1234
1235 DUMPREG(DSI_REVISION);
1236 DUMPREG(DSI_SYSCONFIG);
1237 DUMPREG(DSI_SYSSTATUS);
1238 DUMPREG(DSI_IRQSTATUS);
1239 DUMPREG(DSI_IRQENABLE);
1240 DUMPREG(DSI_CTRL);
1241 DUMPREG(DSI_COMPLEXIO_CFG1);
1242 DUMPREG(DSI_COMPLEXIO_IRQ_STATUS);
1243 DUMPREG(DSI_COMPLEXIO_IRQ_ENABLE);
1244 DUMPREG(DSI_CLK_CTRL);
1245 DUMPREG(DSI_TIMING1);
1246 DUMPREG(DSI_TIMING2);
1247 DUMPREG(DSI_VM_TIMING1);
1248 DUMPREG(DSI_VM_TIMING2);
1249 DUMPREG(DSI_VM_TIMING3);
1250 DUMPREG(DSI_CLK_TIMING);
1251 DUMPREG(DSI_TX_FIFO_VC_SIZE);
1252 DUMPREG(DSI_RX_FIFO_VC_SIZE);
1253 DUMPREG(DSI_COMPLEXIO_CFG2);
1254 DUMPREG(DSI_RX_FIFO_VC_FULLNESS);
1255 DUMPREG(DSI_VM_TIMING4);
1256 DUMPREG(DSI_TX_FIFO_VC_EMPTINESS);
1257 DUMPREG(DSI_VM_TIMING5);
1258 DUMPREG(DSI_VM_TIMING6);
1259 DUMPREG(DSI_VM_TIMING7);
1260 DUMPREG(DSI_STOPCLK_TIMING);
1261
1262 DUMPREG(DSI_VC_CTRL(0));
1263 DUMPREG(DSI_VC_TE(0));
1264 DUMPREG(DSI_VC_LONG_PACKET_HEADER(0));
1265 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(0));
1266 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(0));
1267 DUMPREG(DSI_VC_IRQSTATUS(0));
1268 DUMPREG(DSI_VC_IRQENABLE(0));
1269
1270 DUMPREG(DSI_VC_CTRL(1));
1271 DUMPREG(DSI_VC_TE(1));
1272 DUMPREG(DSI_VC_LONG_PACKET_HEADER(1));
1273 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(1));
1274 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(1));
1275 DUMPREG(DSI_VC_IRQSTATUS(1));
1276 DUMPREG(DSI_VC_IRQENABLE(1));
1277
1278 DUMPREG(DSI_VC_CTRL(2));
1279 DUMPREG(DSI_VC_TE(2));
1280 DUMPREG(DSI_VC_LONG_PACKET_HEADER(2));
1281 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(2));
1282 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(2));
1283 DUMPREG(DSI_VC_IRQSTATUS(2));
1284 DUMPREG(DSI_VC_IRQENABLE(2));
1285
1286 DUMPREG(DSI_VC_CTRL(3));
1287 DUMPREG(DSI_VC_TE(3));
1288 DUMPREG(DSI_VC_LONG_PACKET_HEADER(3));
1289 DUMPREG(DSI_VC_LONG_PACKET_PAYLOAD(3));
1290 DUMPREG(DSI_VC_SHORT_PACKET_HEADER(3));
1291 DUMPREG(DSI_VC_IRQSTATUS(3));
1292 DUMPREG(DSI_VC_IRQENABLE(3));
1293
1294 DUMPREG(DSI_DSIPHY_CFG0);
1295 DUMPREG(DSI_DSIPHY_CFG1);
1296 DUMPREG(DSI_DSIPHY_CFG2);
1297 DUMPREG(DSI_DSIPHY_CFG5);
1298
1299 DUMPREG(DSI_PLL_CONTROL);
1300 DUMPREG(DSI_PLL_STATUS);
1301 DUMPREG(DSI_PLL_GO);
1302 DUMPREG(DSI_PLL_CONFIGURATION1);
1303 DUMPREG(DSI_PLL_CONFIGURATION2);
1304
1305 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
1306#undef DUMPREG
1307}
1308
1309enum dsi_complexio_power_state {
1310 DSI_COMPLEXIO_POWER_OFF = 0x0,
1311 DSI_COMPLEXIO_POWER_ON = 0x1,
1312 DSI_COMPLEXIO_POWER_ULPS = 0x2,
1313};
1314
1315static int dsi_complexio_power(enum dsi_complexio_power_state state)
1316{
1317 int t = 0;
1318
1319 /* PWR_CMD */
1320 REG_FLD_MOD(DSI_COMPLEXIO_CFG1, state, 28, 27);
1321
1322 /* PWR_STATUS */
1323 while (FLD_GET(dsi_read_reg(DSI_COMPLEXIO_CFG1), 26, 25) != state) {
1324 udelay(1);
1325 if (t++ > 1000) {
1326 DSSERR("failed to set complexio power state to "
1327 "%d\n", state);
1328 return -ENODEV;
1329 }
1330 }
1331
1332 return 0;
1333}
1334
1335static void dsi_complexio_config(struct omap_dss_device *dssdev)
1336{
1337 u32 r;
1338
1339 int clk_lane = dssdev->phy.dsi.clk_lane;
1340 int data1_lane = dssdev->phy.dsi.data1_lane;
1341 int data2_lane = dssdev->phy.dsi.data2_lane;
1342 int clk_pol = dssdev->phy.dsi.clk_pol;
1343 int data1_pol = dssdev->phy.dsi.data1_pol;
1344 int data2_pol = dssdev->phy.dsi.data2_pol;
1345
1346 r = dsi_read_reg(DSI_COMPLEXIO_CFG1);
1347 r = FLD_MOD(r, clk_lane, 2, 0);
1348 r = FLD_MOD(r, clk_pol, 3, 3);
1349 r = FLD_MOD(r, data1_lane, 6, 4);
1350 r = FLD_MOD(r, data1_pol, 7, 7);
1351 r = FLD_MOD(r, data2_lane, 10, 8);
1352 r = FLD_MOD(r, data2_pol, 11, 11);
1353 dsi_write_reg(DSI_COMPLEXIO_CFG1, r);
1354
1355 /* The configuration of the DSI complex I/O (number of data lanes,
1356 position, differential order) should not be changed while
1357 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
1358 the hardware to take into account a new configuration of the complex
1359 I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
1360 follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
1361 then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
1362 DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
1363 DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
1364 DSI complex I/O configuration is unknown. */
1365
1366 /*
1367 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1368 REG_FLD_MOD(DSI_CTRL, 0, 0, 0);
1369 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20);
1370 REG_FLD_MOD(DSI_CTRL, 1, 0, 0);
1371 */
1372}
1373
1374static inline unsigned ns2ddr(unsigned ns)
1375{
1376 /* convert time in ns to ddr ticks, rounding up */
1377 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
1378 return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
1379}
1380
1381static inline unsigned ddr2ns(unsigned ddr)
1382{
1383 unsigned long ddr_clk = dsi.current_cinfo.clkin4ddr / 4;
1384 return ddr * 1000 * 1000 / (ddr_clk / 1000);
1385}
1386
1387static void dsi_complexio_timings(void)
1388{
1389 u32 r;
1390 u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
1391 u32 tlpx_half, tclk_trail, tclk_zero;
1392 u32 tclk_prepare;
1393
1394 /* calculate timings */
1395
1396 /* 1 * DDR_CLK = 2 * UI */
1397
1398 /* min 40ns + 4*UI max 85ns + 6*UI */
1399 ths_prepare = ns2ddr(70) + 2;
1400
1401 /* min 145ns + 10*UI */
1402 ths_prepare_ths_zero = ns2ddr(175) + 2;
1403
1404 /* min max(8*UI, 60ns+4*UI) */
1405 ths_trail = ns2ddr(60) + 5;
1406
1407 /* min 100ns */
1408 ths_exit = ns2ddr(145);
1409
1410 /* tlpx min 50n */
1411 tlpx_half = ns2ddr(25);
1412
1413 /* min 60ns */
1414 tclk_trail = ns2ddr(60) + 2;
1415
1416 /* min 38ns, max 95ns */
1417 tclk_prepare = ns2ddr(65);
1418
1419 /* min tclk-prepare + tclk-zero = 300ns */
1420 tclk_zero = ns2ddr(260);
1421
1422 DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
1423 ths_prepare, ddr2ns(ths_prepare),
1424 ths_prepare_ths_zero, ddr2ns(ths_prepare_ths_zero));
1425 DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
1426 ths_trail, ddr2ns(ths_trail),
1427 ths_exit, ddr2ns(ths_exit));
1428
1429 DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
1430 "tclk_zero %u (%uns)\n",
1431 tlpx_half, ddr2ns(tlpx_half),
1432 tclk_trail, ddr2ns(tclk_trail),
1433 tclk_zero, ddr2ns(tclk_zero));
1434 DSSDBG("tclk_prepare %u (%uns)\n",
1435 tclk_prepare, ddr2ns(tclk_prepare));
1436
1437 /* program timings */
1438
1439 r = dsi_read_reg(DSI_DSIPHY_CFG0);
1440 r = FLD_MOD(r, ths_prepare, 31, 24);
1441 r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
1442 r = FLD_MOD(r, ths_trail, 15, 8);
1443 r = FLD_MOD(r, ths_exit, 7, 0);
1444 dsi_write_reg(DSI_DSIPHY_CFG0, r);
1445
1446 r = dsi_read_reg(DSI_DSIPHY_CFG1);
1447 r = FLD_MOD(r, tlpx_half, 22, 16);
1448 r = FLD_MOD(r, tclk_trail, 15, 8);
1449 r = FLD_MOD(r, tclk_zero, 7, 0);
1450 dsi_write_reg(DSI_DSIPHY_CFG1, r);
1451
1452 r = dsi_read_reg(DSI_DSIPHY_CFG2);
1453 r = FLD_MOD(r, tclk_prepare, 7, 0);
1454 dsi_write_reg(DSI_DSIPHY_CFG2, r);
1455}
1456
1457
1458static int dsi_complexio_init(struct omap_dss_device *dssdev)
1459{
1460 int r = 0;
1461
1462 DSSDBG("dsi_complexio_init\n");
1463
1464 /* CIO_CLK_ICG, enable L3 clk to CIO */
1465 REG_FLD_MOD(DSI_CLK_CTRL, 1, 14, 14);
1466
1467 /* A dummy read using the SCP interface to any DSIPHY register is
1468 * required after DSIPHY reset to complete the reset of the DSI complex
1469 * I/O. */
1470 dsi_read_reg(DSI_DSIPHY_CFG5);
1471
1472 if (wait_for_bit_change(DSI_DSIPHY_CFG5, 30, 1) != 1) {
1473 DSSERR("ComplexIO PHY not coming out of reset.\n");
1474 r = -ENODEV;
1475 goto err;
1476 }
1477
1478 dsi_complexio_config(dssdev);
1479
1480 r = dsi_complexio_power(DSI_COMPLEXIO_POWER_ON);
1481
1482 if (r)
1483 goto err;
1484
1485 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
1486 DSSERR("ComplexIO not coming out of reset.\n");
1487 r = -ENODEV;
1488 goto err;
1489 }
1490
1491 if (wait_for_bit_change(DSI_COMPLEXIO_CFG1, 21, 1) != 1) {
1492 DSSERR("ComplexIO LDO power down.\n");
1493 r = -ENODEV;
1494 goto err;
1495 }
1496
1497 dsi_complexio_timings();
1498
1499 /*
1500 The configuration of the DSI complex I/O (number of data lanes,
1501 position, differential order) should not be changed while
1502 DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. For the
1503 hardware to recognize a new configuration of the complex I/O (done
1504 in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to follow
1505 this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1, next
1506 reset the DSS.DSI_CTRL[0] IF_EN to 0, then set DSS.DSI_CLK_CTRL[20]
1507 LP_CLK_ENABLE to 1, and finally, set again the DSS.DSI_CTRL[0] IF_EN
1508 bit to 1. If the sequence is not followed, the DSi complex I/O
1509 configuration is undetermined.
1510 */
1511 dsi_if_enable(1);
1512 dsi_if_enable(0);
1513 REG_FLD_MOD(DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
1514 dsi_if_enable(1);
1515 dsi_if_enable(0);
1516
1517 DSSDBG("CIO init done\n");
1518err:
1519 return r;
1520}
1521
1522static void dsi_complexio_uninit(void)
1523{
1524 dsi_complexio_power(DSI_COMPLEXIO_POWER_OFF);
1525}
1526
1527static int _dsi_wait_reset(void)
1528{
1529 int i = 0;
1530
1531 while (REG_GET(DSI_SYSSTATUS, 0, 0) == 0) {
1532 if (i++ > 5) {
1533 DSSERR("soft reset failed\n");
1534 return -ENODEV;
1535 }
1536 udelay(1);
1537 }
1538
1539 return 0;
1540}
1541
1542static int _dsi_reset(void)
1543{
1544 /* Soft reset */
1545 REG_FLD_MOD(DSI_SYSCONFIG, 1, 1, 1);
1546 return _dsi_wait_reset();
1547}
1548
1549static void dsi_reset_tx_fifo(int channel)
1550{
1551 u32 mask;
1552 u32 l;
1553
1554 /* set fifosize of the channel to 0, then return the old size */
1555 l = dsi_read_reg(DSI_TX_FIFO_VC_SIZE);
1556
1557 mask = FLD_MASK((8 * channel) + 7, (8 * channel) + 4);
1558 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l & ~mask);
1559
1560 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, l);
1561}
1562
1563static void dsi_config_tx_fifo(enum fifo_size size1, enum fifo_size size2,
1564 enum fifo_size size3, enum fifo_size size4)
1565{
1566 u32 r = 0;
1567 int add = 0;
1568 int i;
1569
1570 dsi.vc[0].fifo_size = size1;
1571 dsi.vc[1].fifo_size = size2;
1572 dsi.vc[2].fifo_size = size3;
1573 dsi.vc[3].fifo_size = size4;
1574
1575 for (i = 0; i < 4; i++) {
1576 u8 v;
1577 int size = dsi.vc[i].fifo_size;
1578
1579 if (add + size > 4) {
1580 DSSERR("Illegal FIFO configuration\n");
1581 BUG();
1582 }
1583
1584 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1585 r |= v << (8 * i);
1586 /*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
1587 add += size;
1588 }
1589
1590 dsi_write_reg(DSI_TX_FIFO_VC_SIZE, r);
1591}
1592
1593static void dsi_config_rx_fifo(enum fifo_size size1, enum fifo_size size2,
1594 enum fifo_size size3, enum fifo_size size4)
1595{
1596 u32 r = 0;
1597 int add = 0;
1598 int i;
1599
1600 dsi.vc[0].fifo_size = size1;
1601 dsi.vc[1].fifo_size = size2;
1602 dsi.vc[2].fifo_size = size3;
1603 dsi.vc[3].fifo_size = size4;
1604
1605 for (i = 0; i < 4; i++) {
1606 u8 v;
1607 int size = dsi.vc[i].fifo_size;
1608
1609 if (add + size > 4) {
1610 DSSERR("Illegal FIFO configuration\n");
1611 BUG();
1612 }
1613
1614 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
1615 r |= v << (8 * i);
1616 /*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
1617 add += size;
1618 }
1619
1620 dsi_write_reg(DSI_RX_FIFO_VC_SIZE, r);
1621}
1622
1623static int dsi_force_tx_stop_mode_io(void)
1624{
1625 u32 r;
1626
1627 r = dsi_read_reg(DSI_TIMING1);
1628 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
1629 dsi_write_reg(DSI_TIMING1, r);
1630
1631 if (wait_for_bit_change(DSI_TIMING1, 15, 0) != 0) {
1632 DSSERR("TX_STOP bit not going down\n");
1633 return -EIO;
1634 }
1635
1636 return 0;
1637}
1638
1639static void dsi_vc_print_status(int channel)
1640{
1641 u32 r;
1642
1643 r = dsi_read_reg(DSI_VC_CTRL(channel));
1644 DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
1645 "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
1646 channel,
1647 FLD_GET(r, 5, 5),
1648 FLD_GET(r, 6, 6),
1649 FLD_GET(r, 15, 15),
1650 FLD_GET(r, 16, 16),
1651 FLD_GET(r, 20, 20));
1652
1653 r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
1654 DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
1655}
1656
1657static int dsi_vc_enable(int channel, bool enable)
1658{
1659 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
1660 DSSDBG("dsi_vc_enable channel %d, enable %d\n",
1661 channel, enable);
1662
1663 enable = enable ? 1 : 0;
1664
1665 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 0, 0);
1666
1667 if (wait_for_bit_change(DSI_VC_CTRL(channel), 0, enable) != enable) {
1668 DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
1669 return -EIO;
1670 }
1671
1672 return 0;
1673}
1674
1675static void dsi_vc_initial_config(int channel)
1676{
1677 u32 r;
1678
1679 DSSDBGF("%d", channel);
1680
1681 r = dsi_read_reg(DSI_VC_CTRL(channel));
1682
1683 if (FLD_GET(r, 15, 15)) /* VC_BUSY */
1684 DSSERR("VC(%d) busy when trying to configure it!\n",
1685 channel);
1686
1687 r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */
1688 r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */
1689 r = FLD_MOD(r, 0, 3, 3); /* BTA_LONG_EN */
1690 r = FLD_MOD(r, 0, 4, 4); /* MODE, 0 = command */
1691 r = FLD_MOD(r, 1, 7, 7); /* CS_TX_EN */
1692 r = FLD_MOD(r, 1, 8, 8); /* ECC_TX_EN */
1693 r = FLD_MOD(r, 0, 9, 9); /* MODE_SPEED, high speed on/off */
1694
1695 r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
1696 r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
1697
1698 dsi_write_reg(DSI_VC_CTRL(channel), r);
1699
1700 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1701}
1702
1703static void dsi_vc_config_l4(int channel)
1704{
1705 if (dsi.vc[channel].mode == DSI_VC_MODE_L4)
1706 return;
1707
1708 DSSDBGF("%d", channel);
1709
1710 dsi_vc_enable(channel, 0);
1711
1712 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
1713 DSSERR("vc(%d) busy when trying to config for L4\n", channel);
1714
1715 REG_FLD_MOD(DSI_VC_CTRL(channel), 0, 1, 1); /* SOURCE, 0 = L4 */
1716
1717 dsi_vc_enable(channel, 1);
1718
1719 dsi.vc[channel].mode = DSI_VC_MODE_L4;
1720}
1721
1722static void dsi_vc_config_vp(int channel)
1723{
1724 if (dsi.vc[channel].mode == DSI_VC_MODE_VP)
1725 return;
1726
1727 DSSDBGF("%d", channel);
1728
1729 dsi_vc_enable(channel, 0);
1730
1731 if (REG_GET(DSI_VC_CTRL(channel), 15, 15)) /* VC_BUSY */
1732 DSSERR("vc(%d) busy when trying to config for VP\n", channel);
1733
1734 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 1, 1); /* SOURCE, 1 = video port */
1735
1736 dsi_vc_enable(channel, 1);
1737
1738 dsi.vc[channel].mode = DSI_VC_MODE_VP;
1739}
1740
1741
1742static void dsi_vc_enable_hs(int channel, bool enable)
1743{
1744 DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
1745
1746 dsi_vc_enable(channel, 0);
1747 dsi_if_enable(0);
1748
1749 REG_FLD_MOD(DSI_VC_CTRL(channel), enable, 9, 9);
1750
1751 dsi_vc_enable(channel, 1);
1752 dsi_if_enable(1);
1753
1754 dsi_force_tx_stop_mode_io();
1755}
1756
1757static void dsi_vc_flush_long_data(int channel)
1758{
1759 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1760 u32 val;
1761 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1762 DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
1763 (val >> 0) & 0xff,
1764 (val >> 8) & 0xff,
1765 (val >> 16) & 0xff,
1766 (val >> 24) & 0xff);
1767 }
1768}
1769
1770static void dsi_show_rx_ack_with_err(u16 err)
1771{
1772 DSSERR("\tACK with ERROR (%#x):\n", err);
1773 if (err & (1 << 0))
1774 DSSERR("\t\tSoT Error\n");
1775 if (err & (1 << 1))
1776 DSSERR("\t\tSoT Sync Error\n");
1777 if (err & (1 << 2))
1778 DSSERR("\t\tEoT Sync Error\n");
1779 if (err & (1 << 3))
1780 DSSERR("\t\tEscape Mode Entry Command Error\n");
1781 if (err & (1 << 4))
1782 DSSERR("\t\tLP Transmit Sync Error\n");
1783 if (err & (1 << 5))
1784 DSSERR("\t\tHS Receive Timeout Error\n");
1785 if (err & (1 << 6))
1786 DSSERR("\t\tFalse Control Error\n");
1787 if (err & (1 << 7))
1788 DSSERR("\t\t(reserved7)\n");
1789 if (err & (1 << 8))
1790 DSSERR("\t\tECC Error, single-bit (corrected)\n");
1791 if (err & (1 << 9))
1792 DSSERR("\t\tECC Error, multi-bit (not corrected)\n");
1793 if (err & (1 << 10))
1794 DSSERR("\t\tChecksum Error\n");
1795 if (err & (1 << 11))
1796 DSSERR("\t\tData type not recognized\n");
1797 if (err & (1 << 12))
1798 DSSERR("\t\tInvalid VC ID\n");
1799 if (err & (1 << 13))
1800 DSSERR("\t\tInvalid Transmission Length\n");
1801 if (err & (1 << 14))
1802 DSSERR("\t\t(reserved14)\n");
1803 if (err & (1 << 15))
1804 DSSERR("\t\tDSI Protocol Violation\n");
1805}
1806
1807static u16 dsi_vc_flush_receive_data(int channel)
1808{
1809 /* RX_FIFO_NOT_EMPTY */
1810 while (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
1811 u32 val;
1812 u8 dt;
1813 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
1814 DSSDBG("\trawval %#08x\n", val);
1815 dt = FLD_GET(val, 5, 0);
1816 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
1817 u16 err = FLD_GET(val, 23, 8);
1818 dsi_show_rx_ack_with_err(err);
1819 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
1820 DSSDBG("\tDCS short response, 1 byte: %#x\n",
1821 FLD_GET(val, 23, 8));
1822 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
1823 DSSDBG("\tDCS short response, 2 byte: %#x\n",
1824 FLD_GET(val, 23, 8));
1825 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
1826 DSSDBG("\tDCS long response, len %d\n",
1827 FLD_GET(val, 23, 8));
1828 dsi_vc_flush_long_data(channel);
1829 } else {
1830 DSSERR("\tunknown datatype 0x%02x\n", dt);
1831 }
1832 }
1833 return 0;
1834}
1835
1836static int dsi_vc_send_bta(int channel)
1837{
1838 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
1839 (dsi.debug_write || dsi.debug_read))
1840 DSSDBG("dsi_vc_send_bta %d\n", channel);
1841
1842 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1843
1844 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
1845 DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
1846 dsi_vc_flush_receive_data(channel);
1847 }
1848
1849 REG_FLD_MOD(DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
1850
1851 return 0;
1852}
1853
1854int dsi_vc_send_bta_sync(int channel)
1855{
1856 int r = 0;
1857 u32 err;
1858
1859 INIT_COMPLETION(dsi.bta_completion);
1860
1861 dsi_vc_enable_bta_irq(channel);
1862
1863 r = dsi_vc_send_bta(channel);
1864 if (r)
1865 goto err;
1866
1867 if (wait_for_completion_timeout(&dsi.bta_completion,
1868 msecs_to_jiffies(500)) == 0) {
1869 DSSERR("Failed to receive BTA\n");
1870 r = -EIO;
1871 goto err;
1872 }
1873
1874 err = dsi_get_errors();
1875 if (err) {
1876 DSSERR("Error while sending BTA: %x\n", err);
1877 r = -EIO;
1878 goto err;
1879 }
1880err:
1881 dsi_vc_disable_bta_irq(channel);
1882
1883 return r;
1884}
1885EXPORT_SYMBOL(dsi_vc_send_bta_sync);
1886
1887static inline void dsi_vc_write_long_header(int channel, u8 data_type,
1888 u16 len, u8 ecc)
1889{
1890 u32 val;
1891 u8 data_id;
1892
1893 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1894
1895 /*data_id = data_type | channel << 6; */
1896 data_id = data_type | dsi.vc[channel].dest_per << 6;
1897
1898 val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
1899 FLD_VAL(ecc, 31, 24);
1900
1901 dsi_write_reg(DSI_VC_LONG_PACKET_HEADER(channel), val);
1902}
1903
1904static inline void dsi_vc_write_long_payload(int channel,
1905 u8 b1, u8 b2, u8 b3, u8 b4)
1906{
1907 u32 val;
1908
1909 val = b4 << 24 | b3 << 16 | b2 << 8 | b1 << 0;
1910
1911/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
1912 b1, b2, b3, b4, val); */
1913
1914 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
1915}
1916
1917static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
1918 u8 ecc)
1919{
1920 /*u32 val; */
1921 int i;
1922 u8 *p;
1923 int r = 0;
1924 u8 b1, b2, b3, b4;
1925
1926 if (dsi.debug_write)
1927 DSSDBG("dsi_vc_send_long, %d bytes\n", len);
1928
1929 /* len + header */
1930 if (dsi.vc[channel].fifo_size * 32 * 4 < len + 4) {
1931 DSSERR("unable to send long packet: packet too long.\n");
1932 return -EINVAL;
1933 }
1934
1935 dsi_vc_config_l4(channel);
1936
1937 dsi_vc_write_long_header(channel, data_type, len, ecc);
1938
1939 /*dsi_vc_print_status(0); */
1940
1941 p = data;
1942 for (i = 0; i < len >> 2; i++) {
1943 if (dsi.debug_write)
1944 DSSDBG("\tsending full packet %d\n", i);
1945 /*dsi_vc_print_status(0); */
1946
1947 b1 = *p++;
1948 b2 = *p++;
1949 b3 = *p++;
1950 b4 = *p++;
1951
1952 dsi_vc_write_long_payload(channel, b1, b2, b3, b4);
1953 }
1954
1955 i = len % 4;
1956 if (i) {
1957 b1 = 0; b2 = 0; b3 = 0;
1958
1959 if (dsi.debug_write)
1960 DSSDBG("\tsending remainder bytes %d\n", i);
1961
1962 switch (i) {
1963 case 3:
1964 b1 = *p++;
1965 b2 = *p++;
1966 b3 = *p++;
1967 break;
1968 case 2:
1969 b1 = *p++;
1970 b2 = *p++;
1971 break;
1972 case 1:
1973 b1 = *p++;
1974 break;
1975 }
1976
1977 dsi_vc_write_long_payload(channel, b1, b2, b3, 0);
1978 }
1979
1980 return r;
1981}
1982
1983static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
1984{
1985 u32 r;
1986 u8 data_id;
1987
1988 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
1989
1990 if (dsi.debug_write)
1991 DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
1992 channel,
1993 data_type, data & 0xff, (data >> 8) & 0xff);
1994
1995 dsi_vc_config_l4(channel);
1996
1997 if (FLD_GET(dsi_read_reg(DSI_VC_CTRL(channel)), 16, 16)) {
1998 DSSERR("ERROR FIFO FULL, aborting transfer\n");
1999 return -EINVAL;
2000 }
2001
2002 data_id = data_type | channel << 6;
2003
2004 r = (data_id << 0) | (data << 8) | (ecc << 24);
2005
2006 dsi_write_reg(DSI_VC_SHORT_PACKET_HEADER(channel), r);
2007
2008 return 0;
2009}
2010
2011int dsi_vc_send_null(int channel)
2012{
2013 u8 nullpkg[] = {0, 0, 0, 0};
2014 return dsi_vc_send_long(0, DSI_DT_NULL_PACKET, nullpkg, 4, 0);
2015}
2016EXPORT_SYMBOL(dsi_vc_send_null);
2017
2018int dsi_vc_dcs_write_nosync(int channel, u8 *data, int len)
2019{
2020 int r;
2021
2022 BUG_ON(len == 0);
2023
2024 if (len == 1) {
2025 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_0,
2026 data[0], 0);
2027 } else if (len == 2) {
2028 r = dsi_vc_send_short(channel, DSI_DT_DCS_SHORT_WRITE_1,
2029 data[0] | (data[1] << 8), 0);
2030 } else {
2031 /* 0x39 = DCS Long Write */
2032 r = dsi_vc_send_long(channel, DSI_DT_DCS_LONG_WRITE,
2033 data, len, 0);
2034 }
2035
2036 return r;
2037}
2038EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
2039
2040int dsi_vc_dcs_write(int channel, u8 *data, int len)
2041{
2042 int r;
2043
2044 r = dsi_vc_dcs_write_nosync(channel, data, len);
2045 if (r)
2046 return r;
2047
2048 r = dsi_vc_send_bta_sync(channel);
2049
2050 return r;
2051}
2052EXPORT_SYMBOL(dsi_vc_dcs_write);
2053
2054int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
2055{
2056 u32 val;
2057 u8 dt;
2058 int r;
2059
2060 if (dsi.debug_read)
2061 DSSDBG("dsi_vc_dcs_read(ch%d, dcs_cmd %u)\n", channel, dcs_cmd);
2062
2063 r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
2064 if (r)
2065 return r;
2066
2067 r = dsi_vc_send_bta_sync(channel);
2068 if (r)
2069 return r;
2070
2071 /* RX_FIFO_NOT_EMPTY */
2072 if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
2073 DSSERR("RX fifo empty when trying to read.\n");
2074 return -EIO;
2075 }
2076
2077 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2078 if (dsi.debug_read)
2079 DSSDBG("\theader: %08x\n", val);
2080 dt = FLD_GET(val, 5, 0);
2081 if (dt == DSI_DT_RX_ACK_WITH_ERR) {
2082 u16 err = FLD_GET(val, 23, 8);
2083 dsi_show_rx_ack_with_err(err);
2084 return -EIO;
2085
2086 } else if (dt == DSI_DT_RX_SHORT_READ_1) {
2087 u8 data = FLD_GET(val, 15, 8);
2088 if (dsi.debug_read)
2089 DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
2090
2091 if (buflen < 1)
2092 return -EIO;
2093
2094 buf[0] = data;
2095
2096 return 1;
2097 } else if (dt == DSI_DT_RX_SHORT_READ_2) {
2098 u16 data = FLD_GET(val, 23, 8);
2099 if (dsi.debug_read)
2100 DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
2101
2102 if (buflen < 2)
2103 return -EIO;
2104
2105 buf[0] = data & 0xff;
2106 buf[1] = (data >> 8) & 0xff;
2107
2108 return 2;
2109 } else if (dt == DSI_DT_RX_DCS_LONG_READ) {
2110 int w;
2111 int len = FLD_GET(val, 23, 8);
2112 if (dsi.debug_read)
2113 DSSDBG("\tDCS long response, len %d\n", len);
2114
2115 if (len > buflen)
2116 return -EIO;
2117
2118 /* two byte checksum ends the packet, not included in len */
2119 for (w = 0; w < len + 2;) {
2120 int b;
2121 val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
2122 if (dsi.debug_read)
2123 DSSDBG("\t\t%02x %02x %02x %02x\n",
2124 (val >> 0) & 0xff,
2125 (val >> 8) & 0xff,
2126 (val >> 16) & 0xff,
2127 (val >> 24) & 0xff);
2128
2129 for (b = 0; b < 4; ++b) {
2130 if (w < len)
2131 buf[w] = (val >> (b * 8)) & 0xff;
2132 /* we discard the 2 byte checksum */
2133 ++w;
2134 }
2135 }
2136
2137 return len;
2138
2139 } else {
2140 DSSERR("\tunknown datatype 0x%02x\n", dt);
2141 return -EIO;
2142 }
2143}
2144EXPORT_SYMBOL(dsi_vc_dcs_read);
2145
2146
2147int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
2148{
2149 int r;
2150 r = dsi_vc_send_short(channel, DSI_DT_SET_MAX_RET_PKG_SIZE,
2151 len, 0);
2152
2153 if (r)
2154 return r;
2155
2156 r = dsi_vc_send_bta_sync(channel);
2157
2158 return r;
2159}
2160EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
2161
2162static void dsi_set_lp_rx_timeout(unsigned long ns)
2163{
2164 u32 r;
2165 unsigned x4, x16;
2166 unsigned long fck;
2167 unsigned long ticks;
2168
2169 /* ticks in DSI_FCK */
2170
2171 fck = dsi_fclk_rate();
2172 ticks = (fck / 1000 / 1000) * ns / 1000;
2173 x4 = 0;
2174 x16 = 0;
2175
2176 if (ticks > 0x1fff) {
2177 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2178 x4 = 1;
2179 x16 = 0;
2180 }
2181
2182 if (ticks > 0x1fff) {
2183 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2184 x4 = 0;
2185 x16 = 1;
2186 }
2187
2188 if (ticks > 0x1fff) {
2189 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2190 x4 = 1;
2191 x16 = 1;
2192 }
2193
2194 if (ticks > 0x1fff) {
2195 DSSWARN("LP_TX_TO over limit, setting it to max\n");
2196 ticks = 0x1fff;
2197 x4 = 1;
2198 x16 = 1;
2199 }
2200
2201 r = dsi_read_reg(DSI_TIMING2);
2202 r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
2203 r = FLD_MOD(r, x16, 14, 14); /* LP_RX_TO_X16 */
2204 r = FLD_MOD(r, x4, 13, 13); /* LP_RX_TO_X4 */
2205 r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
2206 dsi_write_reg(DSI_TIMING2, r);
2207
2208 DSSDBG("LP_RX_TO %lu ns (%#lx ticks%s%s)\n",
2209 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2210 (fck / 1000 / 1000),
2211 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2212}
2213
2214static void dsi_set_ta_timeout(unsigned long ns)
2215{
2216 u32 r;
2217 unsigned x8, x16;
2218 unsigned long fck;
2219 unsigned long ticks;
2220
2221 /* ticks in DSI_FCK */
2222 fck = dsi_fclk_rate();
2223 ticks = (fck / 1000 / 1000) * ns / 1000;
2224 x8 = 0;
2225 x16 = 0;
2226
2227 if (ticks > 0x1fff) {
2228 ticks = (fck / 1000 / 1000) * ns / 1000 / 8;
2229 x8 = 1;
2230 x16 = 0;
2231 }
2232
2233 if (ticks > 0x1fff) {
2234 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2235 x8 = 0;
2236 x16 = 1;
2237 }
2238
2239 if (ticks > 0x1fff) {
2240 ticks = (fck / 1000 / 1000) * ns / 1000 / (8 * 16);
2241 x8 = 1;
2242 x16 = 1;
2243 }
2244
2245 if (ticks > 0x1fff) {
2246 DSSWARN("TA_TO over limit, setting it to max\n");
2247 ticks = 0x1fff;
2248 x8 = 1;
2249 x16 = 1;
2250 }
2251
2252 r = dsi_read_reg(DSI_TIMING1);
2253 r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
2254 r = FLD_MOD(r, x16, 30, 30); /* TA_TO_X16 */
2255 r = FLD_MOD(r, x8, 29, 29); /* TA_TO_X8 */
2256 r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
2257 dsi_write_reg(DSI_TIMING1, r);
2258
2259 DSSDBG("TA_TO %lu ns (%#lx ticks%s%s)\n",
2260 (ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1) * 1000) /
2261 (fck / 1000 / 1000),
2262 ticks, x8 ? " x8" : "", x16 ? " x16" : "");
2263}
2264
2265static void dsi_set_stop_state_counter(unsigned long ns)
2266{
2267 u32 r;
2268 unsigned x4, x16;
2269 unsigned long fck;
2270 unsigned long ticks;
2271
2272 /* ticks in DSI_FCK */
2273
2274 fck = dsi_fclk_rate();
2275 ticks = (fck / 1000 / 1000) * ns / 1000;
2276 x4 = 0;
2277 x16 = 0;
2278
2279 if (ticks > 0x1fff) {
2280 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2281 x4 = 1;
2282 x16 = 0;
2283 }
2284
2285 if (ticks > 0x1fff) {
2286 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2287 x4 = 0;
2288 x16 = 1;
2289 }
2290
2291 if (ticks > 0x1fff) {
2292 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2293 x4 = 1;
2294 x16 = 1;
2295 }
2296
2297 if (ticks > 0x1fff) {
2298 DSSWARN("STOP_STATE_COUNTER_IO over limit, "
2299 "setting it to max\n");
2300 ticks = 0x1fff;
2301 x4 = 1;
2302 x16 = 1;
2303 }
2304
2305 r = dsi_read_reg(DSI_TIMING1);
2306 r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
2307 r = FLD_MOD(r, x16, 14, 14); /* STOP_STATE_X16_IO */
2308 r = FLD_MOD(r, x4, 13, 13); /* STOP_STATE_X4_IO */
2309 r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
2310 dsi_write_reg(DSI_TIMING1, r);
2311
2312 DSSDBG("STOP_STATE_COUNTER %lu ns (%#lx ticks%s%s)\n",
2313 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2314 (fck / 1000 / 1000),
2315 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2316}
2317
2318static void dsi_set_hs_tx_timeout(unsigned long ns)
2319{
2320 u32 r;
2321 unsigned x4, x16;
2322 unsigned long fck;
2323 unsigned long ticks;
2324
2325 /* ticks in TxByteClkHS */
2326
2327 fck = dsi_get_txbyteclkhs();
2328 ticks = (fck / 1000 / 1000) * ns / 1000;
2329 x4 = 0;
2330 x16 = 0;
2331
2332 if (ticks > 0x1fff) {
2333 ticks = (fck / 1000 / 1000) * ns / 1000 / 4;
2334 x4 = 1;
2335 x16 = 0;
2336 }
2337
2338 if (ticks > 0x1fff) {
2339 ticks = (fck / 1000 / 1000) * ns / 1000 / 16;
2340 x4 = 0;
2341 x16 = 1;
2342 }
2343
2344 if (ticks > 0x1fff) {
2345 ticks = (fck / 1000 / 1000) * ns / 1000 / (4 * 16);
2346 x4 = 1;
2347 x16 = 1;
2348 }
2349
2350 if (ticks > 0x1fff) {
2351 DSSWARN("HS_TX_TO over limit, setting it to max\n");
2352 ticks = 0x1fff;
2353 x4 = 1;
2354 x16 = 1;
2355 }
2356
2357 r = dsi_read_reg(DSI_TIMING2);
2358 r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
2359 r = FLD_MOD(r, x16, 30, 30); /* HS_TX_TO_X16 */
2360 r = FLD_MOD(r, x4, 29, 29); /* HS_TX_TO_X8 (4 really) */
2361 r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
2362 dsi_write_reg(DSI_TIMING2, r);
2363
2364 DSSDBG("HS_TX_TO %lu ns (%#lx ticks%s%s)\n",
2365 (ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1) * 1000) /
2366 (fck / 1000 / 1000),
2367 ticks, x4 ? " x4" : "", x16 ? " x16" : "");
2368}
2369static int dsi_proto_config(struct omap_dss_device *dssdev)
2370{
2371 u32 r;
2372 int buswidth = 0;
2373
2374 dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
2375 DSI_FIFO_SIZE_0,
2376 DSI_FIFO_SIZE_0,
2377 DSI_FIFO_SIZE_0);
2378
2379 dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
2380 DSI_FIFO_SIZE_0,
2381 DSI_FIFO_SIZE_0,
2382 DSI_FIFO_SIZE_0);
2383
2384 /* XXX what values for the timeouts? */
2385 dsi_set_stop_state_counter(1000);
2386 dsi_set_ta_timeout(6400000);
2387 dsi_set_lp_rx_timeout(48000);
2388 dsi_set_hs_tx_timeout(1000000);
2389
2390 switch (dssdev->ctrl.pixel_size) {
2391 case 16:
2392 buswidth = 0;
2393 break;
2394 case 18:
2395 buswidth = 1;
2396 break;
2397 case 24:
2398 buswidth = 2;
2399 break;
2400 default:
2401 BUG();
2402 }
2403
2404 r = dsi_read_reg(DSI_CTRL);
2405 r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
2406 r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
2407 r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
2408 r = FLD_MOD(r, 1, 4, 4); /* VP_CLK_RATIO, always 1, see errata*/
2409 r = FLD_MOD(r, buswidth, 7, 6); /* VP_DATA_BUS_WIDTH */
2410 r = FLD_MOD(r, 0, 8, 8); /* VP_CLK_POL */
2411 r = FLD_MOD(r, 2, 13, 12); /* LINE_BUFFER, 2 lines */
2412 r = FLD_MOD(r, 1, 14, 14); /* TRIGGER_RESET_MODE */
2413 r = FLD_MOD(r, 1, 19, 19); /* EOT_ENABLE */
2414 r = FLD_MOD(r, 1, 24, 24); /* DCS_CMD_ENABLE */
2415 r = FLD_MOD(r, 0, 25, 25); /* DCS_CMD_CODE, 1=start, 0=continue */
2416
2417 dsi_write_reg(DSI_CTRL, r);
2418
2419 dsi_vc_initial_config(0);
2420
2421 /* set all vc targets to peripheral 0 */
2422 dsi.vc[0].dest_per = 0;
2423 dsi.vc[1].dest_per = 0;
2424 dsi.vc[2].dest_per = 0;
2425 dsi.vc[3].dest_per = 0;
2426
2427 return 0;
2428}
2429
2430static void dsi_proto_timings(struct omap_dss_device *dssdev)
2431{
2432 unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
2433 unsigned tclk_pre, tclk_post;
2434 unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
2435 unsigned ths_trail, ths_exit;
2436 unsigned ddr_clk_pre, ddr_clk_post;
2437 unsigned enter_hs_mode_lat, exit_hs_mode_lat;
2438 unsigned ths_eot;
2439 u32 r;
2440
2441 r = dsi_read_reg(DSI_DSIPHY_CFG0);
2442 ths_prepare = FLD_GET(r, 31, 24);
2443 ths_prepare_ths_zero = FLD_GET(r, 23, 16);
2444 ths_zero = ths_prepare_ths_zero - ths_prepare;
2445 ths_trail = FLD_GET(r, 15, 8);
2446 ths_exit = FLD_GET(r, 7, 0);
2447
2448 r = dsi_read_reg(DSI_DSIPHY_CFG1);
2449 tlpx = FLD_GET(r, 22, 16) * 2;
2450 tclk_trail = FLD_GET(r, 15, 8);
2451 tclk_zero = FLD_GET(r, 7, 0);
2452
2453 r = dsi_read_reg(DSI_DSIPHY_CFG2);
2454 tclk_prepare = FLD_GET(r, 7, 0);
2455
2456 /* min 8*UI */
2457 tclk_pre = 20;
2458 /* min 60ns + 52*UI */
2459 tclk_post = ns2ddr(60) + 26;
2460
2461 /* ths_eot is 2 for 2 datalanes and 4 for 1 datalane */
2462 if (dssdev->phy.dsi.data1_lane != 0 &&
2463 dssdev->phy.dsi.data2_lane != 0)
2464 ths_eot = 2;
2465 else
2466 ths_eot = 4;
2467
2468 ddr_clk_pre = DIV_ROUND_UP(tclk_pre + tlpx + tclk_zero + tclk_prepare,
2469 4);
2470 ddr_clk_post = DIV_ROUND_UP(tclk_post + ths_trail, 4) + ths_eot;
2471
2472 BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
2473 BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
2474
2475 r = dsi_read_reg(DSI_CLK_TIMING);
2476 r = FLD_MOD(r, ddr_clk_pre, 15, 8);
2477 r = FLD_MOD(r, ddr_clk_post, 7, 0);
2478 dsi_write_reg(DSI_CLK_TIMING, r);
2479
2480 DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
2481 ddr_clk_pre,
2482 ddr_clk_post);
2483
2484 enter_hs_mode_lat = 1 + DIV_ROUND_UP(tlpx, 4) +
2485 DIV_ROUND_UP(ths_prepare, 4) +
2486 DIV_ROUND_UP(ths_zero + 3, 4);
2487
2488 exit_hs_mode_lat = DIV_ROUND_UP(ths_trail + ths_exit, 4) + 1 + ths_eot;
2489
2490 r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
2491 FLD_VAL(exit_hs_mode_lat, 15, 0);
2492 dsi_write_reg(DSI_VM_TIMING7, r);
2493
2494 DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
2495 enter_hs_mode_lat, exit_hs_mode_lat);
2496}
2497
2498
2499#define DSI_DECL_VARS \
2500 int __dsi_cb = 0; u32 __dsi_cv = 0;
2501
2502#define DSI_FLUSH(ch) \
2503 if (__dsi_cb > 0) { \
2504 /*DSSDBG("sending long packet %#010x\n", __dsi_cv);*/ \
2505 dsi_write_reg(DSI_VC_LONG_PACKET_PAYLOAD(ch), __dsi_cv); \
2506 __dsi_cb = __dsi_cv = 0; \
2507 }
2508
2509#define DSI_PUSH(ch, data) \
2510 do { \
2511 __dsi_cv |= (data) << (__dsi_cb * 8); \
2512 /*DSSDBG("cv = %#010x, cb = %d\n", __dsi_cv, __dsi_cb);*/ \
2513 if (++__dsi_cb > 3) \
2514 DSI_FLUSH(ch); \
2515 } while (0)
2516
2517static int dsi_update_screen_l4(struct omap_dss_device *dssdev,
2518 int x, int y, int w, int h)
2519{
2520 /* Note: supports only 24bit colors in 32bit container */
2521 int first = 1;
2522 int fifo_stalls = 0;
2523 int max_dsi_packet_size;
2524 int max_data_per_packet;
2525 int max_pixels_per_packet;
2526 int pixels_left;
2527 int bytespp = dssdev->ctrl.pixel_size / 8;
2528 int scr_width;
2529 u32 __iomem *data;
2530 int start_offset;
2531 int horiz_inc;
2532 int current_x;
2533 struct omap_overlay *ovl;
2534
2535 debug_irq = 0;
2536
2537 DSSDBG("dsi_update_screen_l4 (%d,%d %dx%d)\n",
2538 x, y, w, h);
2539
2540 ovl = dssdev->manager->overlays[0];
2541
2542 if (ovl->info.color_mode != OMAP_DSS_COLOR_RGB24U)
2543 return -EINVAL;
2544
2545 if (dssdev->ctrl.pixel_size != 24)
2546 return -EINVAL;
2547
2548 scr_width = ovl->info.screen_width;
2549 data = ovl->info.vaddr;
2550
2551 start_offset = scr_width * y + x;
2552 horiz_inc = scr_width - w;
2553 current_x = x;
2554
2555 /* We need header(4) + DCSCMD(1) + pixels(numpix*bytespp) bytes
2556 * in fifo */
2557
2558 /* When using CPU, max long packet size is TX buffer size */
2559 max_dsi_packet_size = dsi.vc[0].fifo_size * 32 * 4;
2560
2561 /* we seem to get better perf if we divide the tx fifo to half,
2562 and while the other half is being sent, we fill the other half
2563 max_dsi_packet_size /= 2; */
2564
2565 max_data_per_packet = max_dsi_packet_size - 4 - 1;
2566
2567 max_pixels_per_packet = max_data_per_packet / bytespp;
2568
2569 DSSDBG("max_pixels_per_packet %d\n", max_pixels_per_packet);
2570
2571 pixels_left = w * h;
2572
2573 DSSDBG("total pixels %d\n", pixels_left);
2574
2575 data += start_offset;
2576
2577 while (pixels_left > 0) {
2578 /* 0x2c = write_memory_start */
2579 /* 0x3c = write_memory_continue */
2580 u8 dcs_cmd = first ? 0x2c : 0x3c;
2581 int pixels;
2582 DSI_DECL_VARS;
2583 first = 0;
2584
2585#if 1
2586 /* using fifo not empty */
2587 /* TX_FIFO_NOT_EMPTY */
2588 while (FLD_GET(dsi_read_reg(DSI_VC_CTRL(0)), 5, 5)) {
2589 udelay(1);
2590 fifo_stalls++;
2591 if (fifo_stalls > 0xfffff) {
2592 DSSERR("fifo stalls overflow, pixels left %d\n",
2593 pixels_left);
2594 dsi_if_enable(0);
2595 return -EIO;
2596 }
2597 }
2598#elif 1
2599 /* using fifo emptiness */
2600 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 <
2601 max_dsi_packet_size) {
2602 fifo_stalls++;
2603 if (fifo_stalls > 0xfffff) {
2604 DSSERR("fifo stalls overflow, pixels left %d\n",
2605 pixels_left);
2606 dsi_if_enable(0);
2607 return -EIO;
2608 }
2609 }
2610#else
2611 while ((REG_GET(DSI_TX_FIFO_VC_EMPTINESS, 7, 0)+1)*4 == 0) {
2612 fifo_stalls++;
2613 if (fifo_stalls > 0xfffff) {
2614 DSSERR("fifo stalls overflow, pixels left %d\n",
2615 pixels_left);
2616 dsi_if_enable(0);
2617 return -EIO;
2618 }
2619 }
2620#endif
2621 pixels = min(max_pixels_per_packet, pixels_left);
2622
2623 pixels_left -= pixels;
2624
2625 dsi_vc_write_long_header(0, DSI_DT_DCS_LONG_WRITE,
2626 1 + pixels * bytespp, 0);
2627
2628 DSI_PUSH(0, dcs_cmd);
2629
2630 while (pixels-- > 0) {
2631 u32 pix = __raw_readl(data++);
2632
2633 DSI_PUSH(0, (pix >> 16) & 0xff);
2634 DSI_PUSH(0, (pix >> 8) & 0xff);
2635 DSI_PUSH(0, (pix >> 0) & 0xff);
2636
2637 current_x++;
2638 if (current_x == x+w) {
2639 current_x = x;
2640 data += horiz_inc;
2641 }
2642 }
2643
2644 DSI_FLUSH(0);
2645 }
2646
2647 return 0;
2648}
2649
2650static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
2651 u16 x, u16 y, u16 w, u16 h)
2652{
2653 unsigned bytespp;
2654 unsigned bytespl;
2655 unsigned bytespf;
2656 unsigned total_len;
2657 unsigned packet_payload;
2658 unsigned packet_len;
2659 u32 l;
2660 bool use_te_trigger;
2661 const unsigned channel = 0;
2662 /* line buffer is 1024 x 24bits */
2663 /* XXX: for some reason using full buffer size causes considerable TX
2664 * slowdown with update sizes that fill the whole buffer */
2665 const unsigned line_buf_size = 1023 * 3;
2666
2667 use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
2668
2669 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
2670 DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
2671 x, y, w, h);
2672
2673 bytespp = dssdev->ctrl.pixel_size / 8;
2674 bytespl = w * bytespp;
2675 bytespf = bytespl * h;
2676
2677 /* NOTE: packet_payload has to be equal to N * bytespl, where N is
2678 * number of lines in a packet. See errata about VP_CLK_RATIO */
2679
2680 if (bytespf < line_buf_size)
2681 packet_payload = bytespf;
2682 else
2683 packet_payload = (line_buf_size) / bytespl * bytespl;
2684
2685 packet_len = packet_payload + 1; /* 1 byte for DCS cmd */
2686 total_len = (bytespf / packet_payload) * packet_len;
2687
2688 if (bytespf % packet_payload)
2689 total_len += (bytespf % packet_payload) + 1;
2690
2691 if (0)
2692 dsi_vc_print_status(1);
2693
2694 l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
2695 dsi_write_reg(DSI_VC_TE(channel), l);
2696
2697 dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
2698
2699 if (use_te_trigger)
2700 l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
2701 else
2702 l = FLD_MOD(l, 1, 31, 31); /* TE_START */
2703 dsi_write_reg(DSI_VC_TE(channel), l);
2704
2705 /* We put SIDLEMODE to no-idle for the duration of the transfer,
2706 * because DSS interrupts are not capable of waking up the CPU and the
2707 * framedone interrupt could be delayed for quite a long time. I think
2708 * the same goes for any DSS interrupts, but for some reason I have not
2709 * seen the problem anywhere else than here.
2710 */
2711 dispc_disable_sidle();
2712
2713 dss_start_update(dssdev);
2714
2715 if (use_te_trigger) {
2716 /* disable LP_RX_TO, so that we can receive TE. Time to wait
2717 * for TE is longer than the timer allows */
2718 REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
2719
2720 dsi_vc_send_bta(channel);
2721
2722#ifdef DSI_CATCH_MISSING_TE
2723 mod_timer(&dsi.te_timer, jiffies + msecs_to_jiffies(250));
2724#endif
2725 }
2726}
2727
2728#ifdef DSI_CATCH_MISSING_TE
2729static void dsi_te_timeout(unsigned long arg)
2730{
2731 DSSERR("TE not received for 250ms!\n");
2732}
2733#endif
2734
2735static void dsi_framedone_irq_callback(void *data, u32 mask)
2736{
2737 /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
2738 * turns itself off. However, DSI still has the pixels in its buffers,
2739 * and is sending the data.
2740 */
2741
2742 /* SIDLEMODE back to smart-idle */
2743 dispc_enable_sidle();
2744
2745 dsi.framedone_received = true;
2746 wake_up(&dsi.waitqueue);
2747}
2748
2749static void dsi_set_update_region(struct omap_dss_device *dssdev,
2750 u16 x, u16 y, u16 w, u16 h)
2751{
2752 spin_lock(&dsi.update_lock);
2753 if (dsi.update_region.dirty) {
2754 dsi.update_region.x = min(x, dsi.update_region.x);
2755 dsi.update_region.y = min(y, dsi.update_region.y);
2756 dsi.update_region.w = max(w, dsi.update_region.w);
2757 dsi.update_region.h = max(h, dsi.update_region.h);
2758 } else {
2759 dsi.update_region.x = x;
2760 dsi.update_region.y = y;
2761 dsi.update_region.w = w;
2762 dsi.update_region.h = h;
2763 }
2764
2765 dsi.update_region.device = dssdev;
2766 dsi.update_region.dirty = true;
2767
2768 spin_unlock(&dsi.update_lock);
2769
2770}
2771
2772static int dsi_set_update_mode(struct omap_dss_device *dssdev,
2773 enum omap_dss_update_mode mode)
2774{
2775 int r = 0;
2776 int i;
2777
2778 WARN_ON(!mutex_is_locked(&dsi.bus_lock));
2779
2780 if (dsi.update_mode != mode) {
2781 dsi.update_mode = mode;
2782
2783 /* Mark the overlays dirty, and do apply(), so that we get the
2784 * overlays configured properly after update mode change. */
2785 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
2786 struct omap_overlay *ovl;
2787 ovl = omap_dss_get_overlay(i);
2788 if (ovl->manager == dssdev->manager)
2789 ovl->info_dirty = true;
2790 }
2791
2792 r = dssdev->manager->apply(dssdev->manager);
2793
2794 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
2795 mode == OMAP_DSS_UPDATE_AUTO) {
2796 u16 w, h;
2797
2798 DSSDBG("starting auto update\n");
2799
2800 dssdev->get_resolution(dssdev, &w, &h);
2801
2802 dsi_set_update_region(dssdev, 0, 0, w, h);
2803
2804 dsi_perf_mark_start_auto();
2805
2806 wake_up(&dsi.waitqueue);
2807 }
2808 }
2809
2810 return r;
2811}
2812
2813static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
2814{
2815 int r;
2816 r = dssdev->driver->enable_te(dssdev, enable);
2817 /* XXX for some reason, DSI TE breaks if we don't wait here.
2818 * Panel bug? Needs more studying */
2819 msleep(100);
2820 return r;
2821}
2822
2823static void dsi_handle_framedone(void)
2824{
2825 int r;
2826 const int channel = 0;
2827 bool use_te_trigger;
2828
2829 use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
2830
2831 if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
2832 DSSDBG("FRAMEDONE\n");
2833
2834 if (use_te_trigger) {
2835 /* enable LP_RX_TO again after the TE */
2836 REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
2837 }
2838
2839 /* Send BTA after the frame. We need this for the TE to work, as TE
2840 * trigger is only sent for BTAs without preceding packet. Thus we need
2841 * to BTA after the pixel packets so that next BTA will cause TE
2842 * trigger.
2843 *
2844 * This is not needed when TE is not in use, but we do it anyway to
2845 * make sure that the transfer has been completed. It would be more
2846 * optimal, but more complex, to wait only just before starting next
2847 * transfer. */
2848 r = dsi_vc_send_bta_sync(channel);
2849 if (r)
2850 DSSERR("BTA after framedone failed\n");
2851
2852 /* RX_FIFO_NOT_EMPTY */
2853 if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
2854 DSSERR("Received error during frame transfer:\n");
2855 dsi_vc_flush_receive_data(0);
2856 }
2857
2858#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
2859 dispc_fake_vsync_irq();
2860#endif
2861}
2862
2863static int dsi_update_thread(void *data)
2864{
2865 unsigned long timeout;
2866 struct omap_dss_device *device;
2867 u16 x, y, w, h;
2868
2869 while (1) {
2870 bool sched;
2871
2872 wait_event_interruptible(dsi.waitqueue,
2873 dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
2874 (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
2875 dsi.update_region.dirty == true) ||
2876 kthread_should_stop());
2877
2878 if (kthread_should_stop())
2879 break;
2880
2881 dsi_bus_lock();
2882
2883 if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
2884 kthread_should_stop()) {
2885 dsi_bus_unlock();
2886 break;
2887 }
2888
2889 dsi_perf_mark_setup();
2890
2891 if (dsi.update_region.dirty) {
2892 spin_lock(&dsi.update_lock);
2893 dsi.active_update_region = dsi.update_region;
2894 dsi.update_region.dirty = false;
2895 spin_unlock(&dsi.update_lock);
2896 }
2897
2898 device = dsi.active_update_region.device;
2899 x = dsi.active_update_region.x;
2900 y = dsi.active_update_region.y;
2901 w = dsi.active_update_region.w;
2902 h = dsi.active_update_region.h;
2903
2904 if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2905
2906 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
2907 dss_setup_partial_planes(device,
2908 &x, &y, &w, &h);
2909
2910 dispc_set_lcd_size(w, h);
2911 }
2912
2913 if (dsi.active_update_region.dirty) {
2914 dsi.active_update_region.dirty = false;
2915 /* XXX TODO we don't need to send the coords, if they
2916 * are the same that are already programmed to the
2917 * panel. That should speed up manual update a bit */
2918 device->driver->setup_update(device, x, y, w, h);
2919 }
2920
2921 dsi_perf_mark_start();
2922
2923 if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
2924 dsi_vc_config_vp(0);
2925
2926 if (dsi.te_enabled && dsi.use_ext_te)
2927 device->driver->wait_for_te(device);
2928
2929 dsi.framedone_received = false;
2930
2931 dsi_update_screen_dispc(device, x, y, w, h);
2932
2933 /* wait for framedone */
2934 timeout = msecs_to_jiffies(1000);
2935 wait_event_timeout(dsi.waitqueue,
2936 dsi.framedone_received == true,
2937 timeout);
2938
2939 if (!dsi.framedone_received) {
2940 DSSERR("framedone timeout\n");
2941 DSSERR("failed update %d,%d %dx%d\n",
2942 x, y, w, h);
2943
2944 dispc_enable_sidle();
2945 dispc_enable_lcd_out(0);
2946
2947 dsi_reset_tx_fifo(0);
2948 } else {
2949 dsi_handle_framedone();
2950 dsi_perf_show("DISPC");
2951 }
2952 } else {
2953 dsi_update_screen_l4(device, x, y, w, h);
2954 dsi_perf_show("L4");
2955 }
2956
2957 sched = atomic_read(&dsi.bus_lock.count) < 0;
2958
2959 complete_all(&dsi.update_completion);
2960
2961 dsi_bus_unlock();
2962
2963 /* XXX We need to give others chance to get the bus lock. Is
2964 * there a better way for this? */
2965 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
2966 schedule_timeout_interruptible(1);
2967 }
2968
2969 DSSDBG("update thread exiting\n");
2970
2971 return 0;
2972}
2973
2974
2975
2976/* Display funcs */
2977
2978static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
2979{
2980 int r;
2981
2982 r = omap_dispc_register_isr(dsi_framedone_irq_callback, NULL,
2983 DISPC_IRQ_FRAMEDONE);
2984 if (r) {
2985 DSSERR("can't get FRAMEDONE irq\n");
2986 return r;
2987 }
2988
2989 dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
2990
2991 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_DSI);
2992 dispc_enable_fifohandcheck(1);
2993
2994 dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
2995
2996 {
2997 struct omap_video_timings timings = {
2998 .hsw = 1,
2999 .hfp = 1,
3000 .hbp = 1,
3001 .vsw = 1,
3002 .vfp = 0,
3003 .vbp = 0,
3004 };
3005
3006 dispc_set_lcd_timings(&timings);
3007 }
3008
3009 return 0;
3010}
3011
3012static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev)
3013{
3014 omap_dispc_unregister_isr(dsi_framedone_irq_callback, NULL,
3015 DISPC_IRQ_FRAMEDONE);
3016}
3017
3018static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
3019{
3020 struct dsi_clock_info cinfo;
3021 int r;
3022
3023 /* we always use DSS2_FCK as input clock */
3024 cinfo.use_dss2_fck = true;
3025 cinfo.regn = dssdev->phy.dsi.div.regn;
3026 cinfo.regm = dssdev->phy.dsi.div.regm;
3027 cinfo.regm3 = dssdev->phy.dsi.div.regm3;
3028 cinfo.regm4 = dssdev->phy.dsi.div.regm4;
3029 r = dsi_calc_clock_rates(&cinfo);
3030 if (r)
3031 return r;
3032
3033 r = dsi_pll_set_clock_div(&cinfo);
3034 if (r) {
3035 DSSERR("Failed to set dsi clocks\n");
3036 return r;
3037 }
3038
3039 return 0;
3040}
3041
3042static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
3043{
3044 struct dispc_clock_info dispc_cinfo;
3045 int r;
3046 unsigned long long fck;
3047
3048 fck = dsi_get_dsi1_pll_rate();
3049
3050 dispc_cinfo.lck_div = dssdev->phy.dsi.div.lck_div;
3051 dispc_cinfo.pck_div = dssdev->phy.dsi.div.pck_div;
3052
3053 r = dispc_calc_clock_rates(fck, &dispc_cinfo);
3054 if (r) {
3055 DSSERR("Failed to calc dispc clocks\n");
3056 return r;
3057 }
3058
3059 r = dispc_set_clock_div(&dispc_cinfo);
3060 if (r) {
3061 DSSERR("Failed to set dispc clocks\n");
3062 return r;
3063 }
3064
3065 return 0;
3066}
3067
3068static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
3069{
3070 int r;
3071
3072 _dsi_print_reset_status();
3073
3074 r = dsi_pll_init(dssdev, true, true);
3075 if (r)
3076 goto err0;
3077
3078 r = dsi_configure_dsi_clocks(dssdev);
3079 if (r)
3080 goto err1;
3081
3082 dss_select_clk_source(true, true);
3083
3084 DSSDBG("PLL OK\n");
3085
3086 r = dsi_configure_dispc_clocks(dssdev);
3087 if (r)
3088 goto err2;
3089
3090 r = dsi_complexio_init(dssdev);
3091 if (r)
3092 goto err2;
3093
3094 _dsi_print_reset_status();
3095
3096 dsi_proto_timings(dssdev);
3097 dsi_set_lp_clk_divisor(dssdev);
3098
3099 if (1)
3100 _dsi_print_reset_status();
3101
3102 r = dsi_proto_config(dssdev);
3103 if (r)
3104 goto err3;
3105
3106 /* enable interface */
3107 dsi_vc_enable(0, 1);
3108 dsi_if_enable(1);
3109 dsi_force_tx_stop_mode_io();
3110
3111 if (dssdev->driver->enable) {
3112 r = dssdev->driver->enable(dssdev);
3113 if (r)
3114 goto err4;
3115 }
3116
3117 /* enable high-speed after initial config */
3118 dsi_vc_enable_hs(0, 1);
3119
3120 return 0;
3121err4:
3122 dsi_if_enable(0);
3123err3:
3124 dsi_complexio_uninit();
3125err2:
3126 dss_select_clk_source(false, false);
3127err1:
3128 dsi_pll_uninit();
3129err0:
3130 return r;
3131}
3132
3133static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
3134{
3135 if (dssdev->driver->disable)
3136 dssdev->driver->disable(dssdev);
3137
3138 dss_select_clk_source(false, false);
3139 dsi_complexio_uninit();
3140 dsi_pll_uninit();
3141}
3142
3143static int dsi_core_init(void)
3144{
3145 /* Autoidle */
3146 REG_FLD_MOD(DSI_SYSCONFIG, 1, 0, 0);
3147
3148 /* ENWAKEUP */
3149 REG_FLD_MOD(DSI_SYSCONFIG, 1, 2, 2);
3150
3151 /* SIDLEMODE smart-idle */
3152 REG_FLD_MOD(DSI_SYSCONFIG, 2, 4, 3);
3153
3154 _dsi_initialize_irq();
3155
3156 return 0;
3157}
3158
3159static int dsi_display_enable(struct omap_dss_device *dssdev)
3160{
3161 int r = 0;
3162
3163 DSSDBG("dsi_display_enable\n");
3164
3165 mutex_lock(&dsi.lock);
3166 dsi_bus_lock();
3167
3168 r = omap_dss_start_device(dssdev);
3169 if (r) {
3170 DSSERR("failed to start device\n");
3171 goto err0;
3172 }
3173
3174 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
3175 DSSERR("dssdev already enabled\n");
3176 r = -EINVAL;
3177 goto err1;
3178 }
3179
3180 enable_clocks(1);
3181 dsi_enable_pll_clock(1);
3182
3183 r = _dsi_reset();
3184 if (r)
3185 goto err2;
3186
3187 dsi_core_init();
3188
3189 r = dsi_display_init_dispc(dssdev);
3190 if (r)
3191 goto err2;
3192
3193 r = dsi_display_init_dsi(dssdev);
3194 if (r)
3195 goto err3;
3196
3197 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
3198
3199 dsi.use_ext_te = dssdev->phy.dsi.ext_te;
3200 r = dsi_set_te(dssdev, dsi.te_enabled);
3201 if (r)
3202 goto err4;
3203
3204 dsi_set_update_mode(dssdev, dsi.user_update_mode);
3205
3206 dsi_bus_unlock();
3207 mutex_unlock(&dsi.lock);
3208
3209 return 0;
3210
3211err4:
3212
3213 dsi_display_uninit_dsi(dssdev);
3214err3:
3215 dsi_display_uninit_dispc(dssdev);
3216err2:
3217 enable_clocks(0);
3218 dsi_enable_pll_clock(0);
3219err1:
3220 omap_dss_stop_device(dssdev);
3221err0:
3222 dsi_bus_unlock();
3223 mutex_unlock(&dsi.lock);
3224 DSSDBG("dsi_display_enable FAILED\n");
3225 return r;
3226}
3227
3228static void dsi_display_disable(struct omap_dss_device *dssdev)
3229{
3230 DSSDBG("dsi_display_disable\n");
3231
3232 mutex_lock(&dsi.lock);
3233 dsi_bus_lock();
3234
3235 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
3236 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
3237 goto end;
3238
3239 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3240 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
3241
3242 dsi_display_uninit_dispc(dssdev);
3243
3244 dsi_display_uninit_dsi(dssdev);
3245
3246 enable_clocks(0);
3247 dsi_enable_pll_clock(0);
3248
3249 omap_dss_stop_device(dssdev);
3250end:
3251 dsi_bus_unlock();
3252 mutex_unlock(&dsi.lock);
3253}
3254
3255static int dsi_display_suspend(struct omap_dss_device *dssdev)
3256{
3257 DSSDBG("dsi_display_suspend\n");
3258
3259 mutex_lock(&dsi.lock);
3260 dsi_bus_lock();
3261
3262 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
3263 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
3264 goto end;
3265
3266 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3267 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
3268
3269 dsi_display_uninit_dispc(dssdev);
3270
3271 dsi_display_uninit_dsi(dssdev);
3272
3273 enable_clocks(0);
3274 dsi_enable_pll_clock(0);
3275end:
3276 dsi_bus_unlock();
3277 mutex_unlock(&dsi.lock);
3278
3279 return 0;
3280}
3281
3282static int dsi_display_resume(struct omap_dss_device *dssdev)
3283{
3284 int r;
3285
3286 DSSDBG("dsi_display_resume\n");
3287
3288 mutex_lock(&dsi.lock);
3289 dsi_bus_lock();
3290
3291 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
3292 DSSERR("dssdev not suspended\n");
3293 r = -EINVAL;
3294 goto err0;
3295 }
3296
3297 enable_clocks(1);
3298 dsi_enable_pll_clock(1);
3299
3300 r = _dsi_reset();
3301 if (r)
3302 goto err1;
3303
3304 dsi_core_init();
3305
3306 r = dsi_display_init_dispc(dssdev);
3307 if (r)
3308 goto err1;
3309
3310 r = dsi_display_init_dsi(dssdev);
3311 if (r)
3312 goto err2;
3313
3314 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
3315
3316 r = dsi_set_te(dssdev, dsi.te_enabled);
3317 if (r)
3318 goto err2;
3319
3320 dsi_set_update_mode(dssdev, dsi.user_update_mode);
3321
3322 dsi_bus_unlock();
3323 mutex_unlock(&dsi.lock);
3324
3325 return 0;
3326
3327err2:
3328 dsi_display_uninit_dispc(dssdev);
3329err1:
3330 enable_clocks(0);
3331 dsi_enable_pll_clock(0);
3332err0:
3333 dsi_bus_unlock();
3334 mutex_unlock(&dsi.lock);
3335 DSSDBG("dsi_display_resume FAILED\n");
3336 return r;
3337}
3338
3339static int dsi_display_update(struct omap_dss_device *dssdev,
3340 u16 x, u16 y, u16 w, u16 h)
3341{
3342 int r = 0;
3343 u16 dw, dh;
3344
3345 DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
3346
3347 mutex_lock(&dsi.lock);
3348
3349 if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
3350 goto end;
3351
3352 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3353 goto end;
3354
3355 dssdev->get_resolution(dssdev, &dw, &dh);
3356
3357 if (x > dw || y > dh)
3358 goto end;
3359
3360 if (x + w > dw)
3361 w = dw - x;
3362
3363 if (y + h > dh)
3364 h = dh - y;
3365
3366 if (w == 0 || h == 0)
3367 goto end;
3368
3369 if (w == 1) {
3370 r = -EINVAL;
3371 goto end;
3372 }
3373
3374 dsi_set_update_region(dssdev, x, y, w, h);
3375
3376 wake_up(&dsi.waitqueue);
3377
3378end:
3379 mutex_unlock(&dsi.lock);
3380
3381 return r;
3382}
3383
3384static int dsi_display_sync(struct omap_dss_device *dssdev)
3385{
3386 bool wait;
3387
3388 DSSDBG("dsi_display_sync()\n");
3389
3390 mutex_lock(&dsi.lock);
3391 dsi_bus_lock();
3392
3393 if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
3394 dsi.update_region.dirty) {
3395 INIT_COMPLETION(dsi.update_completion);
3396 wait = true;
3397 } else {
3398 wait = false;
3399 }
3400
3401 dsi_bus_unlock();
3402 mutex_unlock(&dsi.lock);
3403
3404 if (wait)
3405 wait_for_completion_interruptible(&dsi.update_completion);
3406
3407 DSSDBG("dsi_display_sync() done\n");
3408 return 0;
3409}
3410
3411static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
3412 enum omap_dss_update_mode mode)
3413{
3414 int r = 0;
3415
3416 DSSDBGF("%d", mode);
3417
3418 mutex_lock(&dsi.lock);
3419 dsi_bus_lock();
3420
3421 dsi.user_update_mode = mode;
3422 r = dsi_set_update_mode(dssdev, mode);
3423
3424 dsi_bus_unlock();
3425 mutex_unlock(&dsi.lock);
3426
3427 return r;
3428}
3429
3430static enum omap_dss_update_mode dsi_display_get_update_mode(
3431 struct omap_dss_device *dssdev)
3432{
3433 return dsi.update_mode;
3434}
3435
3436
3437static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
3438{
3439 int r = 0;
3440
3441 DSSDBGF("%d", enable);
3442
3443 if (!dssdev->driver->enable_te)
3444 return -ENOENT;
3445
3446 dsi_bus_lock();
3447
3448 dsi.te_enabled = enable;
3449
3450 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3451 goto end;
3452
3453 r = dsi_set_te(dssdev, enable);
3454end:
3455 dsi_bus_unlock();
3456
3457 return r;
3458}
3459
3460static int dsi_display_get_te(struct omap_dss_device *dssdev)
3461{
3462 return dsi.te_enabled;
3463}
3464
3465static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
3466{
3467
3468 DSSDBGF("%d", rotate);
3469
3470 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
3471 return -EINVAL;
3472
3473 dsi_bus_lock();
3474 dssdev->driver->set_rotate(dssdev, rotate);
3475 if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
3476 u16 w, h;
3477 /* the display dimensions may have changed, so set a new
3478 * update region */
3479 dssdev->get_resolution(dssdev, &w, &h);
3480 dsi_set_update_region(dssdev, 0, 0, w, h);
3481 }
3482 dsi_bus_unlock();
3483
3484 return 0;
3485}
3486
3487static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
3488{
3489 if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
3490 return 0;
3491
3492 return dssdev->driver->get_rotate(dssdev);
3493}
3494
3495static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
3496{
3497 DSSDBGF("%d", mirror);
3498
3499 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
3500 return -EINVAL;
3501
3502 dsi_bus_lock();
3503 dssdev->driver->set_mirror(dssdev, mirror);
3504 dsi_bus_unlock();
3505
3506 return 0;
3507}
3508
3509static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
3510{
3511 if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
3512 return 0;
3513
3514 return dssdev->driver->get_mirror(dssdev);
3515}
3516
3517static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
3518{
3519 int r;
3520
3521 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3522 return -EIO;
3523
3524 DSSDBGF("%d", test_num);
3525
3526 dsi_bus_lock();
3527
3528 /* run test first in low speed mode */
3529 dsi_vc_enable_hs(0, 0);
3530
3531 if (dssdev->driver->run_test) {
3532 r = dssdev->driver->run_test(dssdev, test_num);
3533 if (r)
3534 goto end;
3535 }
3536
3537 /* then in high speed */
3538 dsi_vc_enable_hs(0, 1);
3539
3540 if (dssdev->driver->run_test) {
3541 r = dssdev->driver->run_test(dssdev, test_num);
3542 if (r)
3543 goto end;
3544 }
3545
3546end:
3547 dsi_vc_enable_hs(0, 1);
3548
3549 dsi_bus_unlock();
3550
3551 return r;
3552}
3553
3554static int dsi_display_memory_read(struct omap_dss_device *dssdev,
3555 void *buf, size_t size,
3556 u16 x, u16 y, u16 w, u16 h)
3557{
3558 int r;
3559
3560 DSSDBGF("");
3561
3562 if (!dssdev->driver->memory_read)
3563 return -EINVAL;
3564
3565 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
3566 return -EIO;
3567
3568 dsi_bus_lock();
3569
3570 r = dssdev->driver->memory_read(dssdev, buf, size,
3571 x, y, w, h);
3572
3573 /* Memory read usually changes the update area. This will
3574 * force the next update to re-set the update area */
3575 dsi.active_update_region.dirty = true;
3576
3577 dsi_bus_unlock();
3578
3579 return r;
3580}
3581
3582void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
3583 u32 fifo_size, enum omap_burst_size *burst_size,
3584 u32 *fifo_low, u32 *fifo_high)
3585{
3586 unsigned burst_size_bytes;
3587
3588 *burst_size = OMAP_DSS_BURST_16x32;
3589 burst_size_bytes = 16 * 32 / 8;
3590
3591 *fifo_high = fifo_size - burst_size_bytes;
3592 *fifo_low = fifo_size - burst_size_bytes * 8;
3593}
3594
3595int dsi_init_display(struct omap_dss_device *dssdev)
3596{
3597 DSSDBG("DSI init\n");
3598
3599 dssdev->enable = dsi_display_enable;
3600 dssdev->disable = dsi_display_disable;
3601 dssdev->suspend = dsi_display_suspend;
3602 dssdev->resume = dsi_display_resume;
3603 dssdev->update = dsi_display_update;
3604 dssdev->sync = dsi_display_sync;
3605 dssdev->set_update_mode = dsi_display_set_update_mode;
3606 dssdev->get_update_mode = dsi_display_get_update_mode;
3607 dssdev->enable_te = dsi_display_enable_te;
3608 dssdev->get_te = dsi_display_get_te;
3609
3610 dssdev->get_rotate = dsi_display_get_rotate;
3611 dssdev->set_rotate = dsi_display_set_rotate;
3612
3613 dssdev->get_mirror = dsi_display_get_mirror;
3614 dssdev->set_mirror = dsi_display_set_mirror;
3615
3616 dssdev->run_test = dsi_display_run_test;
3617 dssdev->memory_read = dsi_display_memory_read;
3618
3619 /* XXX these should be figured out dynamically */
3620 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
3621 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
3622
3623 dsi.vc[0].dssdev = dssdev;
3624 dsi.vc[1].dssdev = dssdev;
3625
3626 return 0;
3627}
3628
3629int dsi_init(struct platform_device *pdev)
3630{
3631 u32 rev;
3632 int r;
3633 struct sched_param param = {
3634 .sched_priority = MAX_USER_RT_PRIO-1
3635 };
3636
3637 spin_lock_init(&dsi.errors_lock);
3638 dsi.errors = 0;
3639
3640 init_completion(&dsi.bta_completion);
3641 init_completion(&dsi.update_completion);
3642
3643 dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
3644 if (IS_ERR(dsi.thread)) {
3645 DSSERR("cannot create kthread\n");
3646 r = PTR_ERR(dsi.thread);
3647 goto err0;
3648 }
3649 sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
3650
3651 init_waitqueue_head(&dsi.waitqueue);
3652 spin_lock_init(&dsi.update_lock);
3653
3654 mutex_init(&dsi.lock);
3655 mutex_init(&dsi.bus_lock);
3656
3657#ifdef DSI_CATCH_MISSING_TE
3658 init_timer(&dsi.te_timer);
3659 dsi.te_timer.function = dsi_te_timeout;
3660 dsi.te_timer.data = 0;
3661#endif
3662
3663 dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
3664 dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
3665
3666 dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
3667 if (!dsi.base) {
3668 DSSERR("can't ioremap DSI\n");
3669 r = -ENOMEM;
3670 goto err1;
3671 }
3672
3673 dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
3674 if (IS_ERR(dsi.vdds_dsi_reg)) {
3675 iounmap(dsi.base);
3676 DSSERR("can't get VDDS_DSI regulator\n");
3677 r = PTR_ERR(dsi.vdds_dsi_reg);
3678 goto err2;
3679 }
3680
3681 enable_clocks(1);
3682
3683 rev = dsi_read_reg(DSI_REVISION);
3684 printk(KERN_INFO "OMAP DSI rev %d.%d\n",
3685 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
3686
3687 enable_clocks(0);
3688
3689 wake_up_process(dsi.thread);
3690
3691 return 0;
3692err2:
3693 iounmap(dsi.base);
3694err1:
3695 kthread_stop(dsi.thread);
3696err0:
3697 return r;
3698}
3699
3700void dsi_exit(void)
3701{
3702 kthread_stop(dsi.thread);
3703
3704 regulator_put(dsi.vdds_dsi_reg);
3705
3706 iounmap(dsi.base);
3707
3708 DSSDBG("omap_dsi_exit\n");
3709}
3710
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
new file mode 100644
index 000000000000..9b05ee65a15d
--- /dev/null
+++ b/drivers/video/omap2/dss/dss.c
@@ -0,0 +1,596 @@
1/*
2 * linux/drivers/video/omap2/dss/dss.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "DSS"
24
25#include <linux/kernel.h>
26#include <linux/io.h>
27#include <linux/err.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
30#include <linux/seq_file.h>
31#include <linux/clk.h>
32
33#include <plat/display.h>
34#include "dss.h"
35
36#define DSS_BASE 0x48050000
37
38#define DSS_SZ_REGS SZ_512
39
40struct dss_reg {
41 u16 idx;
42};
43
44#define DSS_REG(idx) ((const struct dss_reg) { idx })
45
46#define DSS_REVISION DSS_REG(0x0000)
47#define DSS_SYSCONFIG DSS_REG(0x0010)
48#define DSS_SYSSTATUS DSS_REG(0x0014)
49#define DSS_IRQSTATUS DSS_REG(0x0018)
50#define DSS_CONTROL DSS_REG(0x0040)
51#define DSS_SDI_CONTROL DSS_REG(0x0044)
52#define DSS_PLL_CONTROL DSS_REG(0x0048)
53#define DSS_SDI_STATUS DSS_REG(0x005C)
54
55#define REG_GET(idx, start, end) \
56 FLD_GET(dss_read_reg(idx), start, end)
57
58#define REG_FLD_MOD(idx, val, start, end) \
59 dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
60
61static struct {
62 void __iomem *base;
63
64 struct clk *dpll4_m4_ck;
65
66 unsigned long cache_req_pck;
67 unsigned long cache_prate;
68 struct dss_clock_info cache_dss_cinfo;
69 struct dispc_clock_info cache_dispc_cinfo;
70
71 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
72} dss;
73
74static int _omap_dss_wait_reset(void);
75
76static inline void dss_write_reg(const struct dss_reg idx, u32 val)
77{
78 __raw_writel(val, dss.base + idx.idx);
79}
80
81static inline u32 dss_read_reg(const struct dss_reg idx)
82{
83 return __raw_readl(dss.base + idx.idx);
84}
85
86#define SR(reg) \
87 dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
88#define RR(reg) \
89 dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
90
91void dss_save_context(void)
92{
93 if (cpu_is_omap24xx())
94 return;
95
96 SR(SYSCONFIG);
97 SR(CONTROL);
98
99#ifdef CONFIG_OMAP2_DSS_SDI
100 SR(SDI_CONTROL);
101 SR(PLL_CONTROL);
102#endif
103}
104
105void dss_restore_context(void)
106{
107 if (_omap_dss_wait_reset())
108 DSSERR("DSS not coming out of reset after sleep\n");
109
110 RR(SYSCONFIG);
111 RR(CONTROL);
112
113#ifdef CONFIG_OMAP2_DSS_SDI
114 RR(SDI_CONTROL);
115 RR(PLL_CONTROL);
116#endif
117}
118
119#undef SR
120#undef RR
121
122void dss_sdi_init(u8 datapairs)
123{
124 u32 l;
125
126 BUG_ON(datapairs > 3 || datapairs < 1);
127
128 l = dss_read_reg(DSS_SDI_CONTROL);
129 l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
130 l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
131 l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
132 dss_write_reg(DSS_SDI_CONTROL, l);
133
134 l = dss_read_reg(DSS_PLL_CONTROL);
135 l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
136 l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
137 l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
138 dss_write_reg(DSS_PLL_CONTROL, l);
139}
140
141int dss_sdi_enable(void)
142{
143 unsigned long timeout;
144
145 dispc_pck_free_enable(1);
146
147 /* Reset SDI PLL */
148 REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
149 udelay(1); /* wait 2x PCLK */
150
151 /* Lock SDI PLL */
152 REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
153
154 /* Waiting for PLL lock request to complete */
155 timeout = jiffies + msecs_to_jiffies(500);
156 while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
157 if (time_after_eq(jiffies, timeout)) {
158 DSSERR("PLL lock request timed out\n");
159 goto err1;
160 }
161 }
162
163 /* Clearing PLL_GO bit */
164 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
165
166 /* Waiting for PLL to lock */
167 timeout = jiffies + msecs_to_jiffies(500);
168 while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
169 if (time_after_eq(jiffies, timeout)) {
170 DSSERR("PLL lock timed out\n");
171 goto err1;
172 }
173 }
174
175 dispc_lcd_enable_signal(1);
176
177 /* Waiting for SDI reset to complete */
178 timeout = jiffies + msecs_to_jiffies(500);
179 while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
180 if (time_after_eq(jiffies, timeout)) {
181 DSSERR("SDI reset timed out\n");
182 goto err2;
183 }
184 }
185
186 return 0;
187
188 err2:
189 dispc_lcd_enable_signal(0);
190 err1:
191 /* Reset SDI PLL */
192 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
193
194 dispc_pck_free_enable(0);
195
196 return -ETIMEDOUT;
197}
198
199void dss_sdi_disable(void)
200{
201 dispc_lcd_enable_signal(0);
202
203 dispc_pck_free_enable(0);
204
205 /* Reset SDI PLL */
206 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
207}
208
209void dss_dump_clocks(struct seq_file *s)
210{
211 unsigned long dpll4_ck_rate;
212 unsigned long dpll4_m4_ck_rate;
213
214 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
215
216 dpll4_ck_rate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
217 dpll4_m4_ck_rate = clk_get_rate(dss.dpll4_m4_ck);
218
219 seq_printf(s, "- DSS -\n");
220
221 seq_printf(s, "dpll4_ck %lu\n", dpll4_ck_rate);
222
223 seq_printf(s, "dss1_alwon_fclk = %lu / %lu * 2 = %lu\n",
224 dpll4_ck_rate,
225 dpll4_ck_rate / dpll4_m4_ck_rate,
226 dss_clk_get_rate(DSS_CLK_FCK1));
227
228 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
229}
230
231void dss_dump_regs(struct seq_file *s)
232{
233#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
234
235 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
236
237 DUMPREG(DSS_REVISION);
238 DUMPREG(DSS_SYSCONFIG);
239 DUMPREG(DSS_SYSSTATUS);
240 DUMPREG(DSS_IRQSTATUS);
241 DUMPREG(DSS_CONTROL);
242 DUMPREG(DSS_SDI_CONTROL);
243 DUMPREG(DSS_PLL_CONTROL);
244 DUMPREG(DSS_SDI_STATUS);
245
246 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
247#undef DUMPREG
248}
249
250void dss_select_clk_source(bool dsi, bool dispc)
251{
252 u32 r;
253 r = dss_read_reg(DSS_CONTROL);
254 r = FLD_MOD(r, dsi, 1, 1); /* DSI_CLK_SWITCH */
255 r = FLD_MOD(r, dispc, 0, 0); /* DISPC_CLK_SWITCH */
256 dss_write_reg(DSS_CONTROL, r);
257}
258
259int dss_get_dsi_clk_source(void)
260{
261 return FLD_GET(dss_read_reg(DSS_CONTROL), 1, 1);
262}
263
264int dss_get_dispc_clk_source(void)
265{
266 return FLD_GET(dss_read_reg(DSS_CONTROL), 0, 0);
267}
268
269/* calculate clock rates using dividers in cinfo */
270int dss_calc_clock_rates(struct dss_clock_info *cinfo)
271{
272 unsigned long prate;
273
274 if (cinfo->fck_div > 16 || cinfo->fck_div == 0)
275 return -EINVAL;
276
277 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
278
279 cinfo->fck = prate / cinfo->fck_div;
280
281 return 0;
282}
283
284int dss_set_clock_div(struct dss_clock_info *cinfo)
285{
286 unsigned long prate;
287 int r;
288
289 if (cpu_is_omap34xx()) {
290 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
291 DSSDBG("dpll4_m4 = %ld\n", prate);
292
293 r = clk_set_rate(dss.dpll4_m4_ck, prate / cinfo->fck_div);
294 if (r)
295 return r;
296 }
297
298 DSSDBG("fck = %ld (%d)\n", cinfo->fck, cinfo->fck_div);
299
300 return 0;
301}
302
303int dss_get_clock_div(struct dss_clock_info *cinfo)
304{
305 cinfo->fck = dss_clk_get_rate(DSS_CLK_FCK1);
306
307 if (cpu_is_omap34xx()) {
308 unsigned long prate;
309 prate = clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
310 cinfo->fck_div = prate / (cinfo->fck / 2);
311 } else {
312 cinfo->fck_div = 0;
313 }
314
315 return 0;
316}
317
318unsigned long dss_get_dpll4_rate(void)
319{
320 if (cpu_is_omap34xx())
321 return clk_get_rate(clk_get_parent(dss.dpll4_m4_ck));
322 else
323 return 0;
324}
325
326int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
327 struct dss_clock_info *dss_cinfo,
328 struct dispc_clock_info *dispc_cinfo)
329{
330 unsigned long prate;
331 struct dss_clock_info best_dss;
332 struct dispc_clock_info best_dispc;
333
334 unsigned long fck;
335
336 u16 fck_div;
337
338 int match = 0;
339 int min_fck_per_pck;
340
341 prate = dss_get_dpll4_rate();
342
343 fck = dss_clk_get_rate(DSS_CLK_FCK1);
344 if (req_pck == dss.cache_req_pck &&
345 ((cpu_is_omap34xx() && prate == dss.cache_prate) ||
346 dss.cache_dss_cinfo.fck == fck)) {
347 DSSDBG("dispc clock info found from cache.\n");
348 *dss_cinfo = dss.cache_dss_cinfo;
349 *dispc_cinfo = dss.cache_dispc_cinfo;
350 return 0;
351 }
352
353 min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK;
354
355 if (min_fck_per_pck &&
356 req_pck * min_fck_per_pck > DISPC_MAX_FCK) {
357 DSSERR("Requested pixel clock not possible with the current "
358 "OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
359 "the constraint off.\n");
360 min_fck_per_pck = 0;
361 }
362
363retry:
364 memset(&best_dss, 0, sizeof(best_dss));
365 memset(&best_dispc, 0, sizeof(best_dispc));
366
367 if (cpu_is_omap24xx()) {
368 struct dispc_clock_info cur_dispc;
369 /* XXX can we change the clock on omap2? */
370 fck = dss_clk_get_rate(DSS_CLK_FCK1);
371 fck_div = 1;
372
373 dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
374 match = 1;
375
376 best_dss.fck = fck;
377 best_dss.fck_div = fck_div;
378
379 best_dispc = cur_dispc;
380
381 goto found;
382 } else if (cpu_is_omap34xx()) {
383 for (fck_div = 16; fck_div > 0; --fck_div) {
384 struct dispc_clock_info cur_dispc;
385
386 fck = prate / fck_div * 2;
387
388 if (fck > DISPC_MAX_FCK)
389 continue;
390
391 if (min_fck_per_pck &&
392 fck < req_pck * min_fck_per_pck)
393 continue;
394
395 match = 1;
396
397 dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc);
398
399 if (abs(cur_dispc.pck - req_pck) <
400 abs(best_dispc.pck - req_pck)) {
401
402 best_dss.fck = fck;
403 best_dss.fck_div = fck_div;
404
405 best_dispc = cur_dispc;
406
407 if (cur_dispc.pck == req_pck)
408 goto found;
409 }
410 }
411 } else {
412 BUG();
413 }
414
415found:
416 if (!match) {
417 if (min_fck_per_pck) {
418 DSSERR("Could not find suitable clock settings.\n"
419 "Turning FCK/PCK constraint off and"
420 "trying again.\n");
421 min_fck_per_pck = 0;
422 goto retry;
423 }
424
425 DSSERR("Could not find suitable clock settings.\n");
426
427 return -EINVAL;
428 }
429
430 if (dss_cinfo)
431 *dss_cinfo = best_dss;
432 if (dispc_cinfo)
433 *dispc_cinfo = best_dispc;
434
435 dss.cache_req_pck = req_pck;
436 dss.cache_prate = prate;
437 dss.cache_dss_cinfo = best_dss;
438 dss.cache_dispc_cinfo = best_dispc;
439
440 return 0;
441}
442
443
444
445static irqreturn_t dss_irq_handler_omap2(int irq, void *arg)
446{
447 dispc_irq_handler();
448
449 return IRQ_HANDLED;
450}
451
452static irqreturn_t dss_irq_handler_omap3(int irq, void *arg)
453{
454 u32 irqstatus;
455
456 irqstatus = dss_read_reg(DSS_IRQSTATUS);
457
458 if (irqstatus & (1<<0)) /* DISPC_IRQ */
459 dispc_irq_handler();
460#ifdef CONFIG_OMAP2_DSS_DSI
461 if (irqstatus & (1<<1)) /* DSI_IRQ */
462 dsi_irq_handler();
463#endif
464
465 return IRQ_HANDLED;
466}
467
468static int _omap_dss_wait_reset(void)
469{
470 unsigned timeout = 1000;
471
472 while (REG_GET(DSS_SYSSTATUS, 0, 0) == 0) {
473 udelay(1);
474 if (!--timeout) {
475 DSSERR("soft reset failed\n");
476 return -ENODEV;
477 }
478 }
479
480 return 0;
481}
482
483static int _omap_dss_reset(void)
484{
485 /* Soft reset */
486 REG_FLD_MOD(DSS_SYSCONFIG, 1, 1, 1);
487 return _omap_dss_wait_reset();
488}
489
490void dss_set_venc_output(enum omap_dss_venc_type type)
491{
492 int l = 0;
493
494 if (type == OMAP_DSS_VENC_TYPE_COMPOSITE)
495 l = 0;
496 else if (type == OMAP_DSS_VENC_TYPE_SVIDEO)
497 l = 1;
498 else
499 BUG();
500
501 /* venc out selection. 0 = comp, 1 = svideo */
502 REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
503}
504
505void dss_set_dac_pwrdn_bgz(bool enable)
506{
507 REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
508}
509
510int dss_init(bool skip_init)
511{
512 int r;
513 u32 rev;
514
515 dss.base = ioremap(DSS_BASE, DSS_SZ_REGS);
516 if (!dss.base) {
517 DSSERR("can't ioremap DSS\n");
518 r = -ENOMEM;
519 goto fail0;
520 }
521
522 if (!skip_init) {
523 /* disable LCD and DIGIT output. This seems to fix the synclost
524 * problem that we get, if the bootloader starts the DSS and
525 * the kernel resets it */
526 omap_writel(omap_readl(0x48050440) & ~0x3, 0x48050440);
527
528 /* We need to wait here a bit, otherwise we sometimes start to
529 * get synclost errors, and after that only power cycle will
530 * restore DSS functionality. I have no idea why this happens.
531 * And we have to wait _before_ resetting the DSS, but after
532 * enabling clocks.
533 */
534 msleep(50);
535
536 _omap_dss_reset();
537 }
538
539 /* autoidle */
540 REG_FLD_MOD(DSS_SYSCONFIG, 1, 0, 0);
541
542 /* Select DPLL */
543 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
544
545#ifdef CONFIG_OMAP2_DSS_VENC
546 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
547 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
548 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
549#endif
550
551 r = request_irq(INT_24XX_DSS_IRQ,
552 cpu_is_omap24xx()
553 ? dss_irq_handler_omap2
554 : dss_irq_handler_omap3,
555 0, "OMAP DSS", NULL);
556
557 if (r < 0) {
558 DSSERR("omap2 dss: request_irq failed\n");
559 goto fail1;
560 }
561
562 if (cpu_is_omap34xx()) {
563 dss.dpll4_m4_ck = clk_get(NULL, "dpll4_m4_ck");
564 if (IS_ERR(dss.dpll4_m4_ck)) {
565 DSSERR("Failed to get dpll4_m4_ck\n");
566 r = PTR_ERR(dss.dpll4_m4_ck);
567 goto fail2;
568 }
569 }
570
571 dss_save_context();
572
573 rev = dss_read_reg(DSS_REVISION);
574 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
575 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
576
577 return 0;
578
579fail2:
580 free_irq(INT_24XX_DSS_IRQ, NULL);
581fail1:
582 iounmap(dss.base);
583fail0:
584 return r;
585}
586
587void dss_exit(void)
588{
589 if (cpu_is_omap34xx())
590 clk_put(dss.dpll4_m4_ck);
591
592 free_irq(INT_24XX_DSS_IRQ, NULL);
593
594 iounmap(dss.base);
595}
596
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
new file mode 100644
index 000000000000..8da5ac42151b
--- /dev/null
+++ b/drivers/video/omap2/dss/dss.h
@@ -0,0 +1,370 @@
1/*
2 * linux/drivers/video/omap2/dss/dss.h
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#ifndef __OMAP2_DSS_H
24#define __OMAP2_DSS_H
25
26#ifdef CONFIG_OMAP2_DSS_DEBUG_SUPPORT
27#define DEBUG
28#endif
29
30#ifdef DEBUG
31extern unsigned int dss_debug;
32#ifdef DSS_SUBSYS_NAME
33#define DSSDBG(format, ...) \
34 if (dss_debug) \
35 printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME ": " format, \
36 ## __VA_ARGS__)
37#else
38#define DSSDBG(format, ...) \
39 if (dss_debug) \
40 printk(KERN_DEBUG "omapdss: " format, ## __VA_ARGS__)
41#endif
42
43#ifdef DSS_SUBSYS_NAME
44#define DSSDBGF(format, ...) \
45 if (dss_debug) \
46 printk(KERN_DEBUG "omapdss " DSS_SUBSYS_NAME \
47 ": %s(" format ")\n", \
48 __func__, \
49 ## __VA_ARGS__)
50#else
51#define DSSDBGF(format, ...) \
52 if (dss_debug) \
53 printk(KERN_DEBUG "omapdss: " \
54 ": %s(" format ")\n", \
55 __func__, \
56 ## __VA_ARGS__)
57#endif
58
59#else /* DEBUG */
60#define DSSDBG(format, ...)
61#define DSSDBGF(format, ...)
62#endif
63
64
65#ifdef DSS_SUBSYS_NAME
66#define DSSERR(format, ...) \
67 printk(KERN_ERR "omapdss " DSS_SUBSYS_NAME " error: " format, \
68 ## __VA_ARGS__)
69#else
70#define DSSERR(format, ...) \
71 printk(KERN_ERR "omapdss error: " format, ## __VA_ARGS__)
72#endif
73
74#ifdef DSS_SUBSYS_NAME
75#define DSSINFO(format, ...) \
76 printk(KERN_INFO "omapdss " DSS_SUBSYS_NAME ": " format, \
77 ## __VA_ARGS__)
78#else
79#define DSSINFO(format, ...) \
80 printk(KERN_INFO "omapdss: " format, ## __VA_ARGS__)
81#endif
82
83#ifdef DSS_SUBSYS_NAME
84#define DSSWARN(format, ...) \
85 printk(KERN_WARNING "omapdss " DSS_SUBSYS_NAME ": " format, \
86 ## __VA_ARGS__)
87#else
88#define DSSWARN(format, ...) \
89 printk(KERN_WARNING "omapdss: " format, ## __VA_ARGS__)
90#endif
91
92/* OMAP TRM gives bitfields as start:end, where start is the higher bit
93 number. For example 7:0 */
94#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
95#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
96#define FLD_GET(val, start, end) (((val) & FLD_MASK(start, end)) >> (end))
97#define FLD_MOD(orig, val, start, end) \
98 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
99
100#define DISPC_MAX_FCK 173000000
101
102enum omap_burst_size {
103 OMAP_DSS_BURST_4x32 = 0,
104 OMAP_DSS_BURST_8x32 = 1,
105 OMAP_DSS_BURST_16x32 = 2,
106};
107
108enum omap_parallel_interface_mode {
109 OMAP_DSS_PARALLELMODE_BYPASS, /* MIPI DPI */
110 OMAP_DSS_PARALLELMODE_RFBI, /* MIPI DBI */
111 OMAP_DSS_PARALLELMODE_DSI,
112};
113
114enum dss_clock {
115 DSS_CLK_ICK = 1 << 0,
116 DSS_CLK_FCK1 = 1 << 1,
117 DSS_CLK_FCK2 = 1 << 2,
118 DSS_CLK_54M = 1 << 3,
119 DSS_CLK_96M = 1 << 4,
120};
121
122struct dss_clock_info {
123 /* rates that we get with dividers below */
124 unsigned long fck;
125
126 /* dividers */
127 u16 fck_div;
128};
129
130struct dispc_clock_info {
131 /* rates that we get with dividers below */
132 unsigned long lck;
133 unsigned long pck;
134
135 /* dividers */
136 u16 lck_div;
137 u16 pck_div;
138};
139
140struct dsi_clock_info {
141 /* rates that we get with dividers below */
142 unsigned long fint;
143 unsigned long clkin4ddr;
144 unsigned long clkin;
145 unsigned long dsi1_pll_fclk;
146 unsigned long dsi2_pll_fclk;
147
148 unsigned long lp_clk;
149
150 /* dividers */
151 u16 regn;
152 u16 regm;
153 u16 regm3;
154 u16 regm4;
155
156 u16 lp_clk_div;
157
158 u8 highfreq;
159 bool use_dss2_fck;
160};
161
162struct seq_file;
163struct platform_device;
164
165/* core */
166void dss_clk_enable(enum dss_clock clks);
167void dss_clk_disable(enum dss_clock clks);
168unsigned long dss_clk_get_rate(enum dss_clock clk);
169int dss_need_ctx_restore(void);
170void dss_dump_clocks(struct seq_file *s);
171struct bus_type *dss_get_bus(void);
172
173/* display */
174int dss_suspend_all_devices(void);
175int dss_resume_all_devices(void);
176void dss_disable_all_devices(void);
177
178void dss_init_device(struct platform_device *pdev,
179 struct omap_dss_device *dssdev);
180void dss_uninit_device(struct platform_device *pdev,
181 struct omap_dss_device *dssdev);
182bool dss_use_replication(struct omap_dss_device *dssdev,
183 enum omap_color_mode mode);
184void default_get_overlay_fifo_thresholds(enum omap_plane plane,
185 u32 fifo_size, enum omap_burst_size *burst_size,
186 u32 *fifo_low, u32 *fifo_high);
187
188/* manager */
189int dss_init_overlay_managers(struct platform_device *pdev);
190void dss_uninit_overlay_managers(struct platform_device *pdev);
191int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
192void dss_setup_partial_planes(struct omap_dss_device *dssdev,
193 u16 *x, u16 *y, u16 *w, u16 *h);
194void dss_start_update(struct omap_dss_device *dssdev);
195
196/* overlay */
197void dss_init_overlays(struct platform_device *pdev);
198void dss_uninit_overlays(struct platform_device *pdev);
199int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev);
200void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
201#ifdef L4_EXAMPLE
202void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
203#endif
204void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
205
206/* DSS */
207int dss_init(bool skip_init);
208void dss_exit(void);
209
210void dss_save_context(void);
211void dss_restore_context(void);
212
213void dss_dump_regs(struct seq_file *s);
214
215void dss_sdi_init(u8 datapairs);
216int dss_sdi_enable(void);
217void dss_sdi_disable(void);
218
219void dss_select_clk_source(bool dsi, bool dispc);
220int dss_get_dsi_clk_source(void);
221int dss_get_dispc_clk_source(void);
222void dss_set_venc_output(enum omap_dss_venc_type type);
223void dss_set_dac_pwrdn_bgz(bool enable);
224
225unsigned long dss_get_dpll4_rate(void);
226int dss_calc_clock_rates(struct dss_clock_info *cinfo);
227int dss_set_clock_div(struct dss_clock_info *cinfo);
228int dss_get_clock_div(struct dss_clock_info *cinfo);
229int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
230 struct dss_clock_info *dss_cinfo,
231 struct dispc_clock_info *dispc_cinfo);
232
233/* SDI */
234int sdi_init(bool skip_init);
235void sdi_exit(void);
236int sdi_init_display(struct omap_dss_device *display);
237
238/* DSI */
239int dsi_init(struct platform_device *pdev);
240void dsi_exit(void);
241
242void dsi_dump_clocks(struct seq_file *s);
243void dsi_dump_regs(struct seq_file *s);
244
245void dsi_save_context(void);
246void dsi_restore_context(void);
247
248int dsi_init_display(struct omap_dss_device *display);
249void dsi_irq_handler(void);
250unsigned long dsi_get_dsi1_pll_rate(void);
251int dsi_pll_set_clock_div(struct dsi_clock_info *cinfo);
252int dsi_pll_calc_clock_div_pck(bool is_tft, unsigned long req_pck,
253 struct dsi_clock_info *cinfo,
254 struct dispc_clock_info *dispc_cinfo);
255int dsi_pll_init(struct omap_dss_device *dssdev, bool enable_hsclk,
256 bool enable_hsdiv);
257void dsi_pll_uninit(void);
258void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
259 u32 fifo_size, enum omap_burst_size *burst_size,
260 u32 *fifo_low, u32 *fifo_high);
261
262/* DPI */
263int dpi_init(void);
264void dpi_exit(void);
265int dpi_init_display(struct omap_dss_device *dssdev);
266
267/* DISPC */
268int dispc_init(void);
269void dispc_exit(void);
270void dispc_dump_clocks(struct seq_file *s);
271void dispc_dump_regs(struct seq_file *s);
272void dispc_irq_handler(void);
273void dispc_fake_vsync_irq(void);
274
275void dispc_save_context(void);
276void dispc_restore_context(void);
277
278void dispc_enable_sidle(void);
279void dispc_disable_sidle(void);
280
281void dispc_lcd_enable_signal_polarity(bool act_high);
282void dispc_lcd_enable_signal(bool enable);
283void dispc_pck_free_enable(bool enable);
284void dispc_enable_fifohandcheck(bool enable);
285
286void dispc_set_lcd_size(u16 width, u16 height);
287void dispc_set_digit_size(u16 width, u16 height);
288u32 dispc_get_plane_fifo_size(enum omap_plane plane);
289void dispc_setup_plane_fifo(enum omap_plane plane, u32 low, u32 high);
290void dispc_enable_fifomerge(bool enable);
291void dispc_set_burst_size(enum omap_plane plane,
292 enum omap_burst_size burst_size);
293
294void dispc_set_plane_ba0(enum omap_plane plane, u32 paddr);
295void dispc_set_plane_ba1(enum omap_plane plane, u32 paddr);
296void dispc_set_plane_pos(enum omap_plane plane, u16 x, u16 y);
297void dispc_set_plane_size(enum omap_plane plane, u16 width, u16 height);
298void dispc_set_channel_out(enum omap_plane plane,
299 enum omap_channel channel_out);
300
301int dispc_setup_plane(enum omap_plane plane,
302 u32 paddr, u16 screen_width,
303 u16 pos_x, u16 pos_y,
304 u16 width, u16 height,
305 u16 out_width, u16 out_height,
306 enum omap_color_mode color_mode,
307 bool ilace,
308 enum omap_dss_rotation_type rotation_type,
309 u8 rotation, bool mirror,
310 u8 global_alpha);
311
312bool dispc_go_busy(enum omap_channel channel);
313void dispc_go(enum omap_channel channel);
314void dispc_enable_lcd_out(bool enable);
315void dispc_enable_digit_out(bool enable);
316int dispc_enable_plane(enum omap_plane plane, bool enable);
317void dispc_enable_replication(enum omap_plane plane, bool enable);
318
319void dispc_set_parallel_interface_mode(enum omap_parallel_interface_mode mode);
320void dispc_set_tft_data_lines(u8 data_lines);
321void dispc_set_lcd_display_type(enum omap_lcd_display_type type);
322void dispc_set_loadmode(enum omap_dss_load_mode mode);
323
324void dispc_set_default_color(enum omap_channel channel, u32 color);
325u32 dispc_get_default_color(enum omap_channel channel);
326void dispc_set_trans_key(enum omap_channel ch,
327 enum omap_dss_trans_key_type type,
328 u32 trans_key);
329void dispc_get_trans_key(enum omap_channel ch,
330 enum omap_dss_trans_key_type *type,
331 u32 *trans_key);
332void dispc_enable_trans_key(enum omap_channel ch, bool enable);
333void dispc_enable_alpha_blending(enum omap_channel ch, bool enable);
334bool dispc_trans_key_enabled(enum omap_channel ch);
335bool dispc_alpha_blending_enabled(enum omap_channel ch);
336
337bool dispc_lcd_timings_ok(struct omap_video_timings *timings);
338void dispc_set_lcd_timings(struct omap_video_timings *timings);
339unsigned long dispc_fclk_rate(void);
340unsigned long dispc_lclk_rate(void);
341unsigned long dispc_pclk_rate(void);
342void dispc_set_pol_freq(enum omap_panel_config config, u8 acbi, u8 acb);
343void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
344 struct dispc_clock_info *cinfo);
345int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
346 struct dispc_clock_info *cinfo);
347int dispc_set_clock_div(struct dispc_clock_info *cinfo);
348int dispc_get_clock_div(struct dispc_clock_info *cinfo);
349
350
351/* VENC */
352int venc_init(struct platform_device *pdev);
353void venc_exit(void);
354void venc_dump_regs(struct seq_file *s);
355int venc_init_display(struct omap_dss_device *display);
356
357/* RFBI */
358int rfbi_init(void);
359void rfbi_exit(void);
360void rfbi_dump_regs(struct seq_file *s);
361
362int rfbi_configure(int rfbi_module, int bpp, int lines);
363void rfbi_enable_rfbi(bool enable);
364void rfbi_transfer_area(u16 width, u16 height,
365 void (callback)(void *data), void *data);
366void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t);
367unsigned long rfbi_get_max_tx_rate(void);
368int rfbi_init_display(struct omap_dss_device *display);
369
370#endif
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
new file mode 100644
index 000000000000..27d9c465c851
--- /dev/null
+++ b/drivers/video/omap2/dss/manager.c
@@ -0,0 +1,1487 @@
1/*
2 * linux/drivers/video/omap2/dss/manager.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "MANAGER"
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28#include <linux/spinlock.h>
29#include <linux/jiffies.h>
30
31#include <plat/display.h>
32#include <plat/cpu.h>
33
34#include "dss.h"
35
36static int num_managers;
37static struct list_head manager_list;
38
39static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
40{
41 return snprintf(buf, PAGE_SIZE, "%s\n", mgr->name);
42}
43
44static ssize_t manager_display_show(struct omap_overlay_manager *mgr, char *buf)
45{
46 return snprintf(buf, PAGE_SIZE, "%s\n",
47 mgr->device ? mgr->device->name : "<none>");
48}
49
50static ssize_t manager_display_store(struct omap_overlay_manager *mgr,
51 const char *buf, size_t size)
52{
53 int r = 0;
54 size_t len = size;
55 struct omap_dss_device *dssdev = NULL;
56
57 int match(struct omap_dss_device *dssdev, void *data)
58 {
59 const char *str = data;
60 return sysfs_streq(dssdev->name, str);
61 }
62
63 if (buf[size-1] == '\n')
64 --len;
65
66 if (len > 0)
67 dssdev = omap_dss_find_device((void *)buf, match);
68
69 if (len > 0 && dssdev == NULL)
70 return -EINVAL;
71
72 if (dssdev)
73 DSSDBG("display %s found\n", dssdev->name);
74
75 if (mgr->device) {
76 r = mgr->unset_device(mgr);
77 if (r) {
78 DSSERR("failed to unset display\n");
79 goto put_device;
80 }
81 }
82
83 if (dssdev) {
84 r = mgr->set_device(mgr, dssdev);
85 if (r) {
86 DSSERR("failed to set manager\n");
87 goto put_device;
88 }
89
90 r = mgr->apply(mgr);
91 if (r) {
92 DSSERR("failed to apply dispc config\n");
93 goto put_device;
94 }
95 }
96
97put_device:
98 if (dssdev)
99 omap_dss_put_device(dssdev);
100
101 return r ? r : size;
102}
103
104static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
105 char *buf)
106{
107 return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.default_color);
108}
109
110static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
111 const char *buf, size_t size)
112{
113 struct omap_overlay_manager_info info;
114 u32 color;
115 int r;
116
117 if (sscanf(buf, "%d", &color) != 1)
118 return -EINVAL;
119
120 mgr->get_manager_info(mgr, &info);
121
122 info.default_color = color;
123
124 r = mgr->set_manager_info(mgr, &info);
125 if (r)
126 return r;
127
128 r = mgr->apply(mgr);
129 if (r)
130 return r;
131
132 return size;
133}
134
135static const char *trans_key_type_str[] = {
136 "gfx-destination",
137 "video-source",
138};
139
140static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
141 char *buf)
142{
143 enum omap_dss_trans_key_type key_type;
144
145 key_type = mgr->info.trans_key_type;
146 BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
147
148 return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
149}
150
151static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
152 const char *buf, size_t size)
153{
154 enum omap_dss_trans_key_type key_type;
155 struct omap_overlay_manager_info info;
156 int r;
157
158 for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
159 key_type < ARRAY_SIZE(trans_key_type_str); key_type++) {
160 if (sysfs_streq(buf, trans_key_type_str[key_type]))
161 break;
162 }
163
164 if (key_type == ARRAY_SIZE(trans_key_type_str))
165 return -EINVAL;
166
167 mgr->get_manager_info(mgr, &info);
168
169 info.trans_key_type = key_type;
170
171 r = mgr->set_manager_info(mgr, &info);
172 if (r)
173 return r;
174
175 r = mgr->apply(mgr);
176 if (r)
177 return r;
178
179 return size;
180}
181
182static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
183 char *buf)
184{
185 return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_key);
186}
187
188static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
189 const char *buf, size_t size)
190{
191 struct omap_overlay_manager_info info;
192 u32 key_value;
193 int r;
194
195 if (sscanf(buf, "%d", &key_value) != 1)
196 return -EINVAL;
197
198 mgr->get_manager_info(mgr, &info);
199
200 info.trans_key = key_value;
201
202 r = mgr->set_manager_info(mgr, &info);
203 if (r)
204 return r;
205
206 r = mgr->apply(mgr);
207 if (r)
208 return r;
209
210 return size;
211}
212
213static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
214 char *buf)
215{
216 return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_enabled);
217}
218
219static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
220 const char *buf, size_t size)
221{
222 struct omap_overlay_manager_info info;
223 int enable;
224 int r;
225
226 if (sscanf(buf, "%d", &enable) != 1)
227 return -EINVAL;
228
229 mgr->get_manager_info(mgr, &info);
230
231 info.trans_enabled = enable ? true : false;
232
233 r = mgr->set_manager_info(mgr, &info);
234 if (r)
235 return r;
236
237 r = mgr->apply(mgr);
238 if (r)
239 return r;
240
241 return size;
242}
243
244static ssize_t manager_alpha_blending_enabled_show(
245 struct omap_overlay_manager *mgr, char *buf)
246{
247 return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.alpha_enabled);
248}
249
250static ssize_t manager_alpha_blending_enabled_store(
251 struct omap_overlay_manager *mgr,
252 const char *buf, size_t size)
253{
254 struct omap_overlay_manager_info info;
255 int enable;
256 int r;
257
258 if (sscanf(buf, "%d", &enable) != 1)
259 return -EINVAL;
260
261 mgr->get_manager_info(mgr, &info);
262
263 info.alpha_enabled = enable ? true : false;
264
265 r = mgr->set_manager_info(mgr, &info);
266 if (r)
267 return r;
268
269 r = mgr->apply(mgr);
270 if (r)
271 return r;
272
273 return size;
274}
275
276struct manager_attribute {
277 struct attribute attr;
278 ssize_t (*show)(struct omap_overlay_manager *, char *);
279 ssize_t (*store)(struct omap_overlay_manager *, const char *, size_t);
280};
281
282#define MANAGER_ATTR(_name, _mode, _show, _store) \
283 struct manager_attribute manager_attr_##_name = \
284 __ATTR(_name, _mode, _show, _store)
285
286static MANAGER_ATTR(name, S_IRUGO, manager_name_show, NULL);
287static MANAGER_ATTR(display, S_IRUGO|S_IWUSR,
288 manager_display_show, manager_display_store);
289static MANAGER_ATTR(default_color, S_IRUGO|S_IWUSR,
290 manager_default_color_show, manager_default_color_store);
291static MANAGER_ATTR(trans_key_type, S_IRUGO|S_IWUSR,
292 manager_trans_key_type_show, manager_trans_key_type_store);
293static MANAGER_ATTR(trans_key_value, S_IRUGO|S_IWUSR,
294 manager_trans_key_value_show, manager_trans_key_value_store);
295static MANAGER_ATTR(trans_key_enabled, S_IRUGO|S_IWUSR,
296 manager_trans_key_enabled_show,
297 manager_trans_key_enabled_store);
298static MANAGER_ATTR(alpha_blending_enabled, S_IRUGO|S_IWUSR,
299 manager_alpha_blending_enabled_show,
300 manager_alpha_blending_enabled_store);
301
302
303static struct attribute *manager_sysfs_attrs[] = {
304 &manager_attr_name.attr,
305 &manager_attr_display.attr,
306 &manager_attr_default_color.attr,
307 &manager_attr_trans_key_type.attr,
308 &manager_attr_trans_key_value.attr,
309 &manager_attr_trans_key_enabled.attr,
310 &manager_attr_alpha_blending_enabled.attr,
311 NULL
312};
313
314static ssize_t manager_attr_show(struct kobject *kobj, struct attribute *attr,
315 char *buf)
316{
317 struct omap_overlay_manager *manager;
318 struct manager_attribute *manager_attr;
319
320 manager = container_of(kobj, struct omap_overlay_manager, kobj);
321 manager_attr = container_of(attr, struct manager_attribute, attr);
322
323 if (!manager_attr->show)
324 return -ENOENT;
325
326 return manager_attr->show(manager, buf);
327}
328
329static ssize_t manager_attr_store(struct kobject *kobj, struct attribute *attr,
330 const char *buf, size_t size)
331{
332 struct omap_overlay_manager *manager;
333 struct manager_attribute *manager_attr;
334
335 manager = container_of(kobj, struct omap_overlay_manager, kobj);
336 manager_attr = container_of(attr, struct manager_attribute, attr);
337
338 if (!manager_attr->store)
339 return -ENOENT;
340
341 return manager_attr->store(manager, buf, size);
342}
343
344static struct sysfs_ops manager_sysfs_ops = {
345 .show = manager_attr_show,
346 .store = manager_attr_store,
347};
348
349static struct kobj_type manager_ktype = {
350 .sysfs_ops = &manager_sysfs_ops,
351 .default_attrs = manager_sysfs_attrs,
352};
353
354/*
355 * We have 4 levels of cache for the dispc settings. First two are in SW and
356 * the latter two in HW.
357 *
358 * +--------------------+
359 * |overlay/manager_info|
360 * +--------------------+
361 * v
362 * apply()
363 * v
364 * +--------------------+
365 * | dss_cache |
366 * +--------------------+
367 * v
368 * configure()
369 * v
370 * +--------------------+
371 * | shadow registers |
372 * +--------------------+
373 * v
374 * VFP or lcd/digit_enable
375 * v
376 * +--------------------+
377 * | registers |
378 * +--------------------+
379 */
380
381struct overlay_cache_data {
382 /* If true, cache changed, but not written to shadow registers. Set
383 * in apply(), cleared when registers written. */
384 bool dirty;
385 /* If true, shadow registers contain changed values not yet in real
386 * registers. Set when writing to shadow registers, cleared at
387 * VSYNC/EVSYNC */
388 bool shadow_dirty;
389
390 bool enabled;
391
392 u32 paddr;
393 void __iomem *vaddr;
394 u16 screen_width;
395 u16 width;
396 u16 height;
397 enum omap_color_mode color_mode;
398 u8 rotation;
399 enum omap_dss_rotation_type rotation_type;
400 bool mirror;
401
402 u16 pos_x;
403 u16 pos_y;
404 u16 out_width; /* if 0, out_width == width */
405 u16 out_height; /* if 0, out_height == height */
406 u8 global_alpha;
407
408 enum omap_channel channel;
409 bool replication;
410 bool ilace;
411
412 enum omap_burst_size burst_size;
413 u32 fifo_low;
414 u32 fifo_high;
415
416 bool manual_update;
417};
418
419struct manager_cache_data {
420 /* If true, cache changed, but not written to shadow registers. Set
421 * in apply(), cleared when registers written. */
422 bool dirty;
423 /* If true, shadow registers contain changed values not yet in real
424 * registers. Set when writing to shadow registers, cleared at
425 * VSYNC/EVSYNC */
426 bool shadow_dirty;
427
428 u32 default_color;
429
430 enum omap_dss_trans_key_type trans_key_type;
431 u32 trans_key;
432 bool trans_enabled;
433
434 bool alpha_enabled;
435
436 bool manual_upd_display;
437 bool manual_update;
438 bool do_manual_update;
439
440 /* manual update region */
441 u16 x, y, w, h;
442};
443
444static struct {
445 spinlock_t lock;
446 struct overlay_cache_data overlay_cache[3];
447 struct manager_cache_data manager_cache[2];
448
449 bool irq_enabled;
450} dss_cache;
451
452
453
454static int omap_dss_set_device(struct omap_overlay_manager *mgr,
455 struct omap_dss_device *dssdev)
456{
457 int i;
458 int r;
459
460 if (dssdev->manager) {
461 DSSERR("display '%s' already has a manager '%s'\n",
462 dssdev->name, dssdev->manager->name);
463 return -EINVAL;
464 }
465
466 if ((mgr->supported_displays & dssdev->type) == 0) {
467 DSSERR("display '%s' does not support manager '%s'\n",
468 dssdev->name, mgr->name);
469 return -EINVAL;
470 }
471
472 for (i = 0; i < mgr->num_overlays; i++) {
473 struct omap_overlay *ovl = mgr->overlays[i];
474
475 if (ovl->manager != mgr || !ovl->info.enabled)
476 continue;
477
478 r = dss_check_overlay(ovl, dssdev);
479 if (r)
480 return r;
481 }
482
483 dssdev->manager = mgr;
484 mgr->device = dssdev;
485 mgr->device_changed = true;
486
487 return 0;
488}
489
490static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
491{
492 if (!mgr->device) {
493 DSSERR("failed to unset display, display not set.\n");
494 return -EINVAL;
495 }
496
497 mgr->device->manager = NULL;
498 mgr->device = NULL;
499 mgr->device_changed = true;
500
501 return 0;
502}
503
504static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
505{
506 unsigned long timeout = msecs_to_jiffies(500);
507 struct manager_cache_data *mc;
508 enum omap_channel channel;
509 u32 irq;
510 int r;
511 int i;
512
513 if (!mgr->device)
514 return 0;
515
516 if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
517 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
518 channel = OMAP_DSS_CHANNEL_DIGIT;
519 } else {
520 if (mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
521 enum omap_dss_update_mode mode;
522 mode = mgr->device->get_update_mode(mgr->device);
523 if (mode != OMAP_DSS_UPDATE_AUTO)
524 return 0;
525
526 irq = DISPC_IRQ_FRAMEDONE;
527 } else {
528 irq = DISPC_IRQ_VSYNC;
529 }
530 channel = OMAP_DSS_CHANNEL_LCD;
531 }
532
533 mc = &dss_cache.manager_cache[mgr->id];
534 i = 0;
535 while (1) {
536 unsigned long flags;
537 bool shadow_dirty, dirty;
538
539 spin_lock_irqsave(&dss_cache.lock, flags);
540 dirty = mc->dirty;
541 shadow_dirty = mc->shadow_dirty;
542 spin_unlock_irqrestore(&dss_cache.lock, flags);
543
544 if (!dirty && !shadow_dirty) {
545 r = 0;
546 break;
547 }
548
549 /* 4 iterations is the worst case:
550 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
551 * 2 - first VSYNC, dirty = true
552 * 3 - dirty = false, shadow_dirty = true
553 * 4 - shadow_dirty = false */
554 if (i++ == 3) {
555 DSSERR("mgr(%d)->wait_for_go() not finishing\n",
556 mgr->id);
557 r = 0;
558 break;
559 }
560
561 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
562 if (r == -ERESTARTSYS)
563 break;
564
565 if (r) {
566 DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
567 break;
568 }
569 }
570
571 return r;
572}
573
574int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
575{
576 unsigned long timeout = msecs_to_jiffies(500);
577 enum omap_channel channel;
578 struct overlay_cache_data *oc;
579 struct omap_dss_device *dssdev;
580 u32 irq;
581 int r;
582 int i;
583
584 if (!ovl->manager || !ovl->manager->device)
585 return 0;
586
587 dssdev = ovl->manager->device;
588
589 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
590 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
591 channel = OMAP_DSS_CHANNEL_DIGIT;
592 } else {
593 if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
594 enum omap_dss_update_mode mode;
595 mode = dssdev->get_update_mode(dssdev);
596 if (mode != OMAP_DSS_UPDATE_AUTO)
597 return 0;
598
599 irq = DISPC_IRQ_FRAMEDONE;
600 } else {
601 irq = DISPC_IRQ_VSYNC;
602 }
603 channel = OMAP_DSS_CHANNEL_LCD;
604 }
605
606 oc = &dss_cache.overlay_cache[ovl->id];
607 i = 0;
608 while (1) {
609 unsigned long flags;
610 bool shadow_dirty, dirty;
611
612 spin_lock_irqsave(&dss_cache.lock, flags);
613 dirty = oc->dirty;
614 shadow_dirty = oc->shadow_dirty;
615 spin_unlock_irqrestore(&dss_cache.lock, flags);
616
617 if (!dirty && !shadow_dirty) {
618 r = 0;
619 break;
620 }
621
622 /* 4 iterations is the worst case:
623 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
624 * 2 - first VSYNC, dirty = true
625 * 3 - dirty = false, shadow_dirty = true
626 * 4 - shadow_dirty = false */
627 if (i++ == 3) {
628 DSSERR("ovl(%d)->wait_for_go() not finishing\n",
629 ovl->id);
630 r = 0;
631 break;
632 }
633
634 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
635 if (r == -ERESTARTSYS)
636 break;
637
638 if (r) {
639 DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
640 break;
641 }
642 }
643
644 return r;
645}
646
647static int overlay_enabled(struct omap_overlay *ovl)
648{
649 return ovl->info.enabled && ovl->manager && ovl->manager->device;
650}
651
652/* Is rect1 a subset of rect2? */
653static bool rectangle_subset(int x1, int y1, int w1, int h1,
654 int x2, int y2, int w2, int h2)
655{
656 if (x1 < x2 || y1 < y2)
657 return false;
658
659 if (x1 + w1 > x2 + w2)
660 return false;
661
662 if (y1 + h1 > y2 + h2)
663 return false;
664
665 return true;
666}
667
668/* Do rect1 and rect2 overlap? */
669static bool rectangle_intersects(int x1, int y1, int w1, int h1,
670 int x2, int y2, int w2, int h2)
671{
672 if (x1 >= x2 + w2)
673 return false;
674
675 if (x2 >= x1 + w1)
676 return false;
677
678 if (y1 >= y2 + h2)
679 return false;
680
681 if (y2 >= y1 + h1)
682 return false;
683
684 return true;
685}
686
687static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
688{
689 if (oc->out_width != 0 && oc->width != oc->out_width)
690 return true;
691
692 if (oc->out_height != 0 && oc->height != oc->out_height)
693 return true;
694
695 return false;
696}
697
698static int configure_overlay(enum omap_plane plane)
699{
700 struct overlay_cache_data *c;
701 struct manager_cache_data *mc;
702 u16 outw, outh;
703 u16 x, y, w, h;
704 u32 paddr;
705 int r;
706
707 DSSDBGF("%d", plane);
708
709 c = &dss_cache.overlay_cache[plane];
710
711 if (!c->enabled) {
712 dispc_enable_plane(plane, 0);
713 return 0;
714 }
715
716 mc = &dss_cache.manager_cache[c->channel];
717
718 x = c->pos_x;
719 y = c->pos_y;
720 w = c->width;
721 h = c->height;
722 outw = c->out_width == 0 ? c->width : c->out_width;
723 outh = c->out_height == 0 ? c->height : c->out_height;
724 paddr = c->paddr;
725
726 if (c->manual_update && mc->do_manual_update) {
727 unsigned bpp;
728 /* If the overlay is outside the update region, disable it */
729 if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
730 x, y, outw, outh)) {
731 dispc_enable_plane(plane, 0);
732 return 0;
733 }
734
735 switch (c->color_mode) {
736 case OMAP_DSS_COLOR_RGB16:
737 case OMAP_DSS_COLOR_ARGB16:
738 case OMAP_DSS_COLOR_YUV2:
739 case OMAP_DSS_COLOR_UYVY:
740 bpp = 16;
741 break;
742
743 case OMAP_DSS_COLOR_RGB24P:
744 bpp = 24;
745 break;
746
747 case OMAP_DSS_COLOR_RGB24U:
748 case OMAP_DSS_COLOR_ARGB32:
749 case OMAP_DSS_COLOR_RGBA32:
750 case OMAP_DSS_COLOR_RGBX32:
751 bpp = 32;
752 break;
753
754 default:
755 BUG();
756 }
757
758 if (dispc_is_overlay_scaled(c)) {
759 /* If the overlay is scaled, the update area has
760 * already been enlarged to cover the whole overlay. We
761 * only need to adjust x/y here */
762 x = c->pos_x - mc->x;
763 y = c->pos_y - mc->y;
764 } else {
765 if (mc->x > c->pos_x) {
766 x = 0;
767 w -= (mc->x - c->pos_x);
768 paddr += (mc->x - c->pos_x) * bpp / 8;
769 } else {
770 x = c->pos_x - mc->x;
771 }
772
773 if (mc->y > c->pos_y) {
774 y = 0;
775 h -= (mc->y - c->pos_y);
776 paddr += (mc->y - c->pos_y) * c->screen_width *
777 bpp / 8;
778 } else {
779 y = c->pos_y - mc->y;
780 }
781
782 if (mc->w < (x+w))
783 w -= (x+w) - (mc->w);
784
785 if (mc->h < (y+h))
786 h -= (y+h) - (mc->h);
787
788 outw = w;
789 outh = h;
790 }
791 }
792
793 r = dispc_setup_plane(plane,
794 paddr,
795 c->screen_width,
796 x, y,
797 w, h,
798 outw, outh,
799 c->color_mode,
800 c->ilace,
801 c->rotation_type,
802 c->rotation,
803 c->mirror,
804 c->global_alpha);
805
806 if (r) {
807 /* this shouldn't happen */
808 DSSERR("dispc_setup_plane failed for ovl %d\n", plane);
809 dispc_enable_plane(plane, 0);
810 return r;
811 }
812
813 dispc_enable_replication(plane, c->replication);
814
815 dispc_set_burst_size(plane, c->burst_size);
816 dispc_setup_plane_fifo(plane, c->fifo_low, c->fifo_high);
817
818 dispc_enable_plane(plane, 1);
819
820 return 0;
821}
822
823static void configure_manager(enum omap_channel channel)
824{
825 struct manager_cache_data *c;
826
827 DSSDBGF("%d", channel);
828
829 c = &dss_cache.manager_cache[channel];
830
831 dispc_set_trans_key(channel, c->trans_key_type, c->trans_key);
832 dispc_enable_trans_key(channel, c->trans_enabled);
833 dispc_enable_alpha_blending(channel, c->alpha_enabled);
834}
835
836/* configure_dispc() tries to write values from cache to shadow registers.
837 * It writes only to those managers/overlays that are not busy.
838 * returns 0 if everything could be written to shadow registers.
839 * returns 1 if not everything could be written to shadow registers. */
840static int configure_dispc(void)
841{
842 struct overlay_cache_data *oc;
843 struct manager_cache_data *mc;
844 const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
845 const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
846 int i;
847 int r;
848 bool mgr_busy[2];
849 bool mgr_go[2];
850 bool busy;
851
852 r = 0;
853 busy = false;
854
855 mgr_busy[0] = dispc_go_busy(0);
856 mgr_busy[1] = dispc_go_busy(1);
857 mgr_go[0] = false;
858 mgr_go[1] = false;
859
860 /* Commit overlay settings */
861 for (i = 0; i < num_ovls; ++i) {
862 oc = &dss_cache.overlay_cache[i];
863 mc = &dss_cache.manager_cache[oc->channel];
864
865 if (!oc->dirty)
866 continue;
867
868 if (oc->manual_update && !mc->do_manual_update)
869 continue;
870
871 if (mgr_busy[oc->channel]) {
872 busy = true;
873 continue;
874 }
875
876 r = configure_overlay(i);
877 if (r)
878 DSSERR("configure_overlay %d failed\n", i);
879
880 oc->dirty = false;
881 oc->shadow_dirty = true;
882 mgr_go[oc->channel] = true;
883 }
884
885 /* Commit manager settings */
886 for (i = 0; i < num_mgrs; ++i) {
887 mc = &dss_cache.manager_cache[i];
888
889 if (!mc->dirty)
890 continue;
891
892 if (mc->manual_update && !mc->do_manual_update)
893 continue;
894
895 if (mgr_busy[i]) {
896 busy = true;
897 continue;
898 }
899
900 configure_manager(i);
901 mc->dirty = false;
902 mc->shadow_dirty = true;
903 mgr_go[i] = true;
904 }
905
906 /* set GO */
907 for (i = 0; i < num_mgrs; ++i) {
908 mc = &dss_cache.manager_cache[i];
909
910 if (!mgr_go[i])
911 continue;
912
913 /* We don't need GO with manual update display. LCD iface will
914 * always be turned off after frame, and new settings will be
915 * taken in to use at next update */
916 if (!mc->manual_upd_display)
917 dispc_go(i);
918 }
919
920 if (busy)
921 r = 1;
922 else
923 r = 0;
924
925 return r;
926}
927
928/* Configure dispc for partial update. Return possibly modified update
929 * area */
930void dss_setup_partial_planes(struct omap_dss_device *dssdev,
931 u16 *xi, u16 *yi, u16 *wi, u16 *hi)
932{
933 struct overlay_cache_data *oc;
934 struct manager_cache_data *mc;
935 const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
936 struct omap_overlay_manager *mgr;
937 int i;
938 u16 x, y, w, h;
939 unsigned long flags;
940
941 x = *xi;
942 y = *yi;
943 w = *wi;
944 h = *hi;
945
946 DSSDBG("dispc_setup_partial_planes %d,%d %dx%d\n",
947 *xi, *yi, *wi, *hi);
948
949 mgr = dssdev->manager;
950
951 if (!mgr) {
952 DSSDBG("no manager\n");
953 return;
954 }
955
956 spin_lock_irqsave(&dss_cache.lock, flags);
957
958 /* We need to show the whole overlay if it is scaled. So look for
959 * those, and make the update area larger if found.
960 * Also mark the overlay cache dirty */
961 for (i = 0; i < num_ovls; ++i) {
962 unsigned x1, y1, x2, y2;
963 unsigned outw, outh;
964
965 oc = &dss_cache.overlay_cache[i];
966
967 if (oc->channel != mgr->id)
968 continue;
969
970 oc->dirty = true;
971
972 if (!oc->enabled)
973 continue;
974
975 if (!dispc_is_overlay_scaled(oc))
976 continue;
977
978 outw = oc->out_width == 0 ? oc->width : oc->out_width;
979 outh = oc->out_height == 0 ? oc->height : oc->out_height;
980
981 /* is the overlay outside the update region? */
982 if (!rectangle_intersects(x, y, w, h,
983 oc->pos_x, oc->pos_y,
984 outw, outh))
985 continue;
986
987 /* if the overlay totally inside the update region? */
988 if (rectangle_subset(oc->pos_x, oc->pos_y, outw, outh,
989 x, y, w, h))
990 continue;
991
992 if (x > oc->pos_x)
993 x1 = oc->pos_x;
994 else
995 x1 = x;
996
997 if (y > oc->pos_y)
998 y1 = oc->pos_y;
999 else
1000 y1 = y;
1001
1002 if ((x + w) < (oc->pos_x + outw))
1003 x2 = oc->pos_x + outw;
1004 else
1005 x2 = x + w;
1006
1007 if ((y + h) < (oc->pos_y + outh))
1008 y2 = oc->pos_y + outh;
1009 else
1010 y2 = y + h;
1011
1012 x = x1;
1013 y = y1;
1014 w = x2 - x1;
1015 h = y2 - y1;
1016
1017 DSSDBG("changing upd area due to ovl(%d) scaling %d,%d %dx%d\n",
1018 i, x, y, w, h);
1019 }
1020
1021 mc = &dss_cache.manager_cache[mgr->id];
1022 mc->do_manual_update = true;
1023 mc->x = x;
1024 mc->y = y;
1025 mc->w = w;
1026 mc->h = h;
1027
1028 configure_dispc();
1029
1030 mc->do_manual_update = false;
1031
1032 spin_unlock_irqrestore(&dss_cache.lock, flags);
1033
1034 *xi = x;
1035 *yi = y;
1036 *wi = w;
1037 *hi = h;
1038}
1039
1040void dss_start_update(struct omap_dss_device *dssdev)
1041{
1042 struct manager_cache_data *mc;
1043 struct overlay_cache_data *oc;
1044 const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
1045 const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
1046 struct omap_overlay_manager *mgr;
1047 int i;
1048
1049 mgr = dssdev->manager;
1050
1051 for (i = 0; i < num_ovls; ++i) {
1052 oc = &dss_cache.overlay_cache[i];
1053 if (oc->channel != mgr->id)
1054 continue;
1055
1056 oc->shadow_dirty = false;
1057 }
1058
1059 for (i = 0; i < num_mgrs; ++i) {
1060 mc = &dss_cache.manager_cache[i];
1061 if (mgr->id != i)
1062 continue;
1063
1064 mc->shadow_dirty = false;
1065 }
1066
1067 dispc_enable_lcd_out(1);
1068}
1069
1070static void dss_apply_irq_handler(void *data, u32 mask)
1071{
1072 struct manager_cache_data *mc;
1073 struct overlay_cache_data *oc;
1074 const int num_ovls = ARRAY_SIZE(dss_cache.overlay_cache);
1075 const int num_mgrs = ARRAY_SIZE(dss_cache.manager_cache);
1076 int i, r;
1077 bool mgr_busy[2];
1078
1079 mgr_busy[0] = dispc_go_busy(0);
1080 mgr_busy[1] = dispc_go_busy(1);
1081
1082 spin_lock(&dss_cache.lock);
1083
1084 for (i = 0; i < num_ovls; ++i) {
1085 oc = &dss_cache.overlay_cache[i];
1086 if (!mgr_busy[oc->channel])
1087 oc->shadow_dirty = false;
1088 }
1089
1090 for (i = 0; i < num_mgrs; ++i) {
1091 mc = &dss_cache.manager_cache[i];
1092 if (!mgr_busy[i])
1093 mc->shadow_dirty = false;
1094 }
1095
1096 r = configure_dispc();
1097 if (r == 1)
1098 goto end;
1099
1100 /* re-read busy flags */
1101 mgr_busy[0] = dispc_go_busy(0);
1102 mgr_busy[1] = dispc_go_busy(1);
1103
1104 /* keep running as long as there are busy managers, so that
1105 * we can collect overlay-applied information */
1106 for (i = 0; i < num_mgrs; ++i) {
1107 if (mgr_busy[i])
1108 goto end;
1109 }
1110
1111 omap_dispc_unregister_isr(dss_apply_irq_handler, NULL,
1112 DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
1113 DISPC_IRQ_EVSYNC_EVEN);
1114 dss_cache.irq_enabled = false;
1115
1116end:
1117 spin_unlock(&dss_cache.lock);
1118}
1119
1120static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
1121{
1122 struct overlay_cache_data *oc;
1123 struct manager_cache_data *mc;
1124 int i;
1125 struct omap_overlay *ovl;
1126 int num_planes_enabled = 0;
1127 bool use_fifomerge;
1128 unsigned long flags;
1129 int r;
1130
1131 DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
1132
1133 spin_lock_irqsave(&dss_cache.lock, flags);
1134
1135 /* Configure overlays */
1136 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
1137 struct omap_dss_device *dssdev;
1138
1139 ovl = omap_dss_get_overlay(i);
1140
1141 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
1142 continue;
1143
1144 oc = &dss_cache.overlay_cache[ovl->id];
1145
1146 if (!overlay_enabled(ovl)) {
1147 if (oc->enabled) {
1148 oc->enabled = false;
1149 oc->dirty = true;
1150 }
1151 continue;
1152 }
1153
1154 if (!ovl->info_dirty) {
1155 if (oc->enabled)
1156 ++num_planes_enabled;
1157 continue;
1158 }
1159
1160 dssdev = ovl->manager->device;
1161
1162 if (dss_check_overlay(ovl, dssdev)) {
1163 if (oc->enabled) {
1164 oc->enabled = false;
1165 oc->dirty = true;
1166 }
1167 continue;
1168 }
1169
1170 ovl->info_dirty = false;
1171 oc->dirty = true;
1172
1173 oc->paddr = ovl->info.paddr;
1174 oc->vaddr = ovl->info.vaddr;
1175 oc->screen_width = ovl->info.screen_width;
1176 oc->width = ovl->info.width;
1177 oc->height = ovl->info.height;
1178 oc->color_mode = ovl->info.color_mode;
1179 oc->rotation = ovl->info.rotation;
1180 oc->rotation_type = ovl->info.rotation_type;
1181 oc->mirror = ovl->info.mirror;
1182 oc->pos_x = ovl->info.pos_x;
1183 oc->pos_y = ovl->info.pos_y;
1184 oc->out_width = ovl->info.out_width;
1185 oc->out_height = ovl->info.out_height;
1186 oc->global_alpha = ovl->info.global_alpha;
1187
1188 oc->replication =
1189 dss_use_replication(dssdev, ovl->info.color_mode);
1190
1191 oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
1192
1193 oc->channel = ovl->manager->id;
1194
1195 oc->enabled = true;
1196
1197 oc->manual_update =
1198 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
1199 dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
1200
1201 ++num_planes_enabled;
1202 }
1203
1204 /* Configure managers */
1205 list_for_each_entry(mgr, &manager_list, list) {
1206 struct omap_dss_device *dssdev;
1207
1208 if (!(mgr->caps & OMAP_DSS_OVL_MGR_CAP_DISPC))
1209 continue;
1210
1211 mc = &dss_cache.manager_cache[mgr->id];
1212
1213 if (mgr->device_changed) {
1214 mgr->device_changed = false;
1215 mgr->info_dirty = true;
1216 }
1217
1218 if (!mgr->info_dirty)
1219 continue;
1220
1221 if (!mgr->device)
1222 continue;
1223
1224 dssdev = mgr->device;
1225
1226 mgr->info_dirty = false;
1227 mc->dirty = true;
1228
1229 mc->default_color = mgr->info.default_color;
1230 mc->trans_key_type = mgr->info.trans_key_type;
1231 mc->trans_key = mgr->info.trans_key;
1232 mc->trans_enabled = mgr->info.trans_enabled;
1233 mc->alpha_enabled = mgr->info.alpha_enabled;
1234
1235 mc->manual_upd_display =
1236 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
1237
1238 mc->manual_update =
1239 dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
1240 dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
1241 }
1242
1243 /* XXX TODO: Try to get fifomerge working. The problem is that it
1244 * affects both managers, not individually but at the same time. This
1245 * means the change has to be well synchronized. I guess the proper way
1246 * is to have a two step process for fifo merge:
1247 * fifomerge enable:
1248 * 1. disable other planes, leaving one plane enabled
1249 * 2. wait until the planes are disabled on HW
1250 * 3. config merged fifo thresholds, enable fifomerge
1251 * fifomerge disable:
1252 * 1. config unmerged fifo thresholds, disable fifomerge
1253 * 2. wait until fifo changes are in HW
1254 * 3. enable planes
1255 */
1256 use_fifomerge = false;
1257
1258 /* Configure overlay fifos */
1259 for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
1260 struct omap_dss_device *dssdev;
1261 u32 size;
1262
1263 ovl = omap_dss_get_overlay(i);
1264
1265 if (!(ovl->caps & OMAP_DSS_OVL_CAP_DISPC))
1266 continue;
1267
1268 oc = &dss_cache.overlay_cache[ovl->id];
1269
1270 if (!oc->enabled)
1271 continue;
1272
1273 dssdev = ovl->manager->device;
1274
1275 size = dispc_get_plane_fifo_size(ovl->id);
1276 if (use_fifomerge)
1277 size *= 3;
1278
1279 switch (dssdev->type) {
1280 case OMAP_DISPLAY_TYPE_DPI:
1281 case OMAP_DISPLAY_TYPE_DBI:
1282 case OMAP_DISPLAY_TYPE_SDI:
1283 case OMAP_DISPLAY_TYPE_VENC:
1284 default_get_overlay_fifo_thresholds(ovl->id, size,
1285 &oc->burst_size, &oc->fifo_low,
1286 &oc->fifo_high);
1287 break;
1288#ifdef CONFIG_OMAP2_DSS_DSI
1289 case OMAP_DISPLAY_TYPE_DSI:
1290 dsi_get_overlay_fifo_thresholds(ovl->id, size,
1291 &oc->burst_size, &oc->fifo_low,
1292 &oc->fifo_high);
1293 break;
1294#endif
1295 default:
1296 BUG();
1297 }
1298 }
1299
1300 r = 0;
1301 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
1302 if (!dss_cache.irq_enabled) {
1303 r = omap_dispc_register_isr(dss_apply_irq_handler, NULL,
1304 DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
1305 DISPC_IRQ_EVSYNC_EVEN);
1306 dss_cache.irq_enabled = true;
1307 }
1308 configure_dispc();
1309 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
1310
1311 spin_unlock_irqrestore(&dss_cache.lock, flags);
1312
1313 return r;
1314}
1315
1316static int dss_check_manager(struct omap_overlay_manager *mgr)
1317{
1318 /* OMAP supports only graphics source transparency color key and alpha
1319 * blending simultaneously. See TRM 15.4.2.4.2.2 Alpha Mode */
1320
1321 if (mgr->info.alpha_enabled && mgr->info.trans_enabled &&
1322 mgr->info.trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST)
1323 return -EINVAL;
1324
1325 return 0;
1326}
1327
1328static int omap_dss_mgr_set_info(struct omap_overlay_manager *mgr,
1329 struct omap_overlay_manager_info *info)
1330{
1331 int r;
1332 struct omap_overlay_manager_info old_info;
1333
1334 old_info = mgr->info;
1335 mgr->info = *info;
1336
1337 r = dss_check_manager(mgr);
1338 if (r) {
1339 mgr->info = old_info;
1340 return r;
1341 }
1342
1343 mgr->info_dirty = true;
1344
1345 return 0;
1346}
1347
1348static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
1349 struct omap_overlay_manager_info *info)
1350{
1351 *info = mgr->info;
1352}
1353
1354static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager)
1355{
1356 ++num_managers;
1357 list_add_tail(&manager->list, &manager_list);
1358}
1359
1360int dss_init_overlay_managers(struct platform_device *pdev)
1361{
1362 int i, r;
1363
1364 spin_lock_init(&dss_cache.lock);
1365
1366 INIT_LIST_HEAD(&manager_list);
1367
1368 num_managers = 0;
1369
1370 for (i = 0; i < 2; ++i) {
1371 struct omap_overlay_manager *mgr;
1372 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
1373
1374 BUG_ON(mgr == NULL);
1375
1376 switch (i) {
1377 case 0:
1378 mgr->name = "lcd";
1379 mgr->id = OMAP_DSS_CHANNEL_LCD;
1380 mgr->supported_displays =
1381 OMAP_DISPLAY_TYPE_DPI | OMAP_DISPLAY_TYPE_DBI |
1382 OMAP_DISPLAY_TYPE_SDI | OMAP_DISPLAY_TYPE_DSI;
1383 break;
1384 case 1:
1385 mgr->name = "tv";
1386 mgr->id = OMAP_DSS_CHANNEL_DIGIT;
1387 mgr->supported_displays = OMAP_DISPLAY_TYPE_VENC;
1388 break;
1389 }
1390
1391 mgr->set_device = &omap_dss_set_device;
1392 mgr->unset_device = &omap_dss_unset_device;
1393 mgr->apply = &omap_dss_mgr_apply;
1394 mgr->set_manager_info = &omap_dss_mgr_set_info;
1395 mgr->get_manager_info = &omap_dss_mgr_get_info;
1396 mgr->wait_for_go = &dss_mgr_wait_for_go;
1397
1398 mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC;
1399
1400 dss_overlay_setup_dispc_manager(mgr);
1401
1402 omap_dss_add_overlay_manager(mgr);
1403
1404 r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
1405 &pdev->dev.kobj, "manager%d", i);
1406
1407 if (r) {
1408 DSSERR("failed to create sysfs file\n");
1409 continue;
1410 }
1411 }
1412
1413#ifdef L4_EXAMPLE
1414 {
1415 int omap_dss_mgr_apply_l4(struct omap_overlay_manager *mgr)
1416 {
1417 DSSDBG("omap_dss_mgr_apply_l4(%s)\n", mgr->name);
1418
1419 return 0;
1420 }
1421
1422 struct omap_overlay_manager *mgr;
1423 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
1424
1425 BUG_ON(mgr == NULL);
1426
1427 mgr->name = "l4";
1428 mgr->supported_displays =
1429 OMAP_DISPLAY_TYPE_DBI | OMAP_DISPLAY_TYPE_DSI;
1430
1431 mgr->set_device = &omap_dss_set_device;
1432 mgr->unset_device = &omap_dss_unset_device;
1433 mgr->apply = &omap_dss_mgr_apply_l4;
1434 mgr->set_manager_info = &omap_dss_mgr_set_info;
1435 mgr->get_manager_info = &omap_dss_mgr_get_info;
1436
1437 dss_overlay_setup_l4_manager(mgr);
1438
1439 omap_dss_add_overlay_manager(mgr);
1440
1441 r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
1442 &pdev->dev.kobj, "managerl4");
1443
1444 if (r)
1445 DSSERR("failed to create sysfs file\n");
1446 }
1447#endif
1448
1449 return 0;
1450}
1451
1452void dss_uninit_overlay_managers(struct platform_device *pdev)
1453{
1454 struct omap_overlay_manager *mgr;
1455
1456 while (!list_empty(&manager_list)) {
1457 mgr = list_first_entry(&manager_list,
1458 struct omap_overlay_manager, list);
1459 list_del(&mgr->list);
1460 kobject_del(&mgr->kobj);
1461 kobject_put(&mgr->kobj);
1462 kfree(mgr);
1463 }
1464
1465 num_managers = 0;
1466}
1467
1468int omap_dss_get_num_overlay_managers(void)
1469{
1470 return num_managers;
1471}
1472EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
1473
1474struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
1475{
1476 int i = 0;
1477 struct omap_overlay_manager *mgr;
1478
1479 list_for_each_entry(mgr, &manager_list, list) {
1480 if (i++ == num)
1481 return mgr;
1482 }
1483
1484 return NULL;
1485}
1486EXPORT_SYMBOL(omap_dss_get_overlay_manager);
1487
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
new file mode 100644
index 000000000000..b7f9a7339842
--- /dev/null
+++ b/drivers/video/omap2/dss/overlay.c
@@ -0,0 +1,680 @@
1/*
2 * linux/drivers/video/omap2/dss/overlay.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "OVERLAY"
24
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/err.h>
28#include <linux/sysfs.h>
29#include <linux/kobject.h>
30#include <linux/platform_device.h>
31#include <linux/delay.h>
32
33#include <plat/display.h>
34#include <plat/cpu.h>
35
36#include "dss.h"
37
38static int num_overlays;
39static struct list_head overlay_list;
40
41static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
42{
43 return snprintf(buf, PAGE_SIZE, "%s\n", ovl->name);
44}
45
46static ssize_t overlay_manager_show(struct omap_overlay *ovl, char *buf)
47{
48 return snprintf(buf, PAGE_SIZE, "%s\n",
49 ovl->manager ? ovl->manager->name : "<none>");
50}
51
52static ssize_t overlay_manager_store(struct omap_overlay *ovl, const char *buf,
53 size_t size)
54{
55 int i, r;
56 struct omap_overlay_manager *mgr = NULL;
57 struct omap_overlay_manager *old_mgr;
58 int len = size;
59
60 if (buf[size-1] == '\n')
61 --len;
62
63 if (len > 0) {
64 for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
65 mgr = omap_dss_get_overlay_manager(i);
66
67 if (strncmp(buf, mgr->name, len) == 0)
68 break;
69
70 mgr = NULL;
71 }
72 }
73
74 if (len > 0 && mgr == NULL)
75 return -EINVAL;
76
77 if (mgr)
78 DSSDBG("manager %s found\n", mgr->name);
79
80 if (mgr == ovl->manager)
81 return size;
82
83 old_mgr = ovl->manager;
84
85 /* detach old manager */
86 if (old_mgr) {
87 r = ovl->unset_manager(ovl);
88 if (r) {
89 DSSERR("detach failed\n");
90 return r;
91 }
92
93 r = old_mgr->apply(old_mgr);
94 if (r)
95 return r;
96 }
97
98 if (mgr) {
99 r = ovl->set_manager(ovl, mgr);
100 if (r) {
101 DSSERR("Failed to attach overlay\n");
102 return r;
103 }
104
105 r = mgr->apply(mgr);
106 if (r)
107 return r;
108 }
109
110 return size;
111}
112
113static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
114{
115 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
116 ovl->info.width, ovl->info.height);
117}
118
119static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
120{
121 return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width);
122}
123
124static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
125{
126 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
127 ovl->info.pos_x, ovl->info.pos_y);
128}
129
130static ssize_t overlay_position_store(struct omap_overlay *ovl,
131 const char *buf, size_t size)
132{
133 int r;
134 char *last;
135 struct omap_overlay_info info;
136
137 ovl->get_overlay_info(ovl, &info);
138
139 info.pos_x = simple_strtoul(buf, &last, 10);
140 ++last;
141 if (last - buf >= size)
142 return -EINVAL;
143
144 info.pos_y = simple_strtoul(last, &last, 10);
145
146 r = ovl->set_overlay_info(ovl, &info);
147 if (r)
148 return r;
149
150 if (ovl->manager) {
151 r = ovl->manager->apply(ovl->manager);
152 if (r)
153 return r;
154 }
155
156 return size;
157}
158
159static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
160{
161 return snprintf(buf, PAGE_SIZE, "%d,%d\n",
162 ovl->info.out_width, ovl->info.out_height);
163}
164
165static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
166 const char *buf, size_t size)
167{
168 int r;
169 char *last;
170 struct omap_overlay_info info;
171
172 ovl->get_overlay_info(ovl, &info);
173
174 info.out_width = simple_strtoul(buf, &last, 10);
175 ++last;
176 if (last - buf >= size)
177 return -EINVAL;
178
179 info.out_height = simple_strtoul(last, &last, 10);
180
181 r = ovl->set_overlay_info(ovl, &info);
182 if (r)
183 return r;
184
185 if (ovl->manager) {
186 r = ovl->manager->apply(ovl->manager);
187 if (r)
188 return r;
189 }
190
191 return size;
192}
193
194static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
195{
196 return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled);
197}
198
199static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
200 size_t size)
201{
202 int r;
203 struct omap_overlay_info info;
204
205 ovl->get_overlay_info(ovl, &info);
206
207 info.enabled = simple_strtoul(buf, NULL, 10);
208
209 r = ovl->set_overlay_info(ovl, &info);
210 if (r)
211 return r;
212
213 if (ovl->manager) {
214 r = ovl->manager->apply(ovl->manager);
215 if (r)
216 return r;
217 }
218
219 return size;
220}
221
222static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
223{
224 return snprintf(buf, PAGE_SIZE, "%d\n",
225 ovl->info.global_alpha);
226}
227
228static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
229 const char *buf, size_t size)
230{
231 int r;
232 struct omap_overlay_info info;
233
234 ovl->get_overlay_info(ovl, &info);
235
236 /* Video1 plane does not support global alpha
237 * to always make it 255 completely opaque
238 */
239 if (ovl->id == OMAP_DSS_VIDEO1)
240 info.global_alpha = 255;
241 else
242 info.global_alpha = simple_strtoul(buf, NULL, 10);
243
244 r = ovl->set_overlay_info(ovl, &info);
245 if (r)
246 return r;
247
248 if (ovl->manager) {
249 r = ovl->manager->apply(ovl->manager);
250 if (r)
251 return r;
252 }
253
254 return size;
255}
256
257struct overlay_attribute {
258 struct attribute attr;
259 ssize_t (*show)(struct omap_overlay *, char *);
260 ssize_t (*store)(struct omap_overlay *, const char *, size_t);
261};
262
263#define OVERLAY_ATTR(_name, _mode, _show, _store) \
264 struct overlay_attribute overlay_attr_##_name = \
265 __ATTR(_name, _mode, _show, _store)
266
267static OVERLAY_ATTR(name, S_IRUGO, overlay_name_show, NULL);
268static OVERLAY_ATTR(manager, S_IRUGO|S_IWUSR,
269 overlay_manager_show, overlay_manager_store);
270static OVERLAY_ATTR(input_size, S_IRUGO, overlay_input_size_show, NULL);
271static OVERLAY_ATTR(screen_width, S_IRUGO, overlay_screen_width_show, NULL);
272static OVERLAY_ATTR(position, S_IRUGO|S_IWUSR,
273 overlay_position_show, overlay_position_store);
274static OVERLAY_ATTR(output_size, S_IRUGO|S_IWUSR,
275 overlay_output_size_show, overlay_output_size_store);
276static OVERLAY_ATTR(enabled, S_IRUGO|S_IWUSR,
277 overlay_enabled_show, overlay_enabled_store);
278static OVERLAY_ATTR(global_alpha, S_IRUGO|S_IWUSR,
279 overlay_global_alpha_show, overlay_global_alpha_store);
280
281static struct attribute *overlay_sysfs_attrs[] = {
282 &overlay_attr_name.attr,
283 &overlay_attr_manager.attr,
284 &overlay_attr_input_size.attr,
285 &overlay_attr_screen_width.attr,
286 &overlay_attr_position.attr,
287 &overlay_attr_output_size.attr,
288 &overlay_attr_enabled.attr,
289 &overlay_attr_global_alpha.attr,
290 NULL
291};
292
293static ssize_t overlay_attr_show(struct kobject *kobj, struct attribute *attr,
294 char *buf)
295{
296 struct omap_overlay *overlay;
297 struct overlay_attribute *overlay_attr;
298
299 overlay = container_of(kobj, struct omap_overlay, kobj);
300 overlay_attr = container_of(attr, struct overlay_attribute, attr);
301
302 if (!overlay_attr->show)
303 return -ENOENT;
304
305 return overlay_attr->show(overlay, buf);
306}
307
308static ssize_t overlay_attr_store(struct kobject *kobj, struct attribute *attr,
309 const char *buf, size_t size)
310{
311 struct omap_overlay *overlay;
312 struct overlay_attribute *overlay_attr;
313
314 overlay = container_of(kobj, struct omap_overlay, kobj);
315 overlay_attr = container_of(attr, struct overlay_attribute, attr);
316
317 if (!overlay_attr->store)
318 return -ENOENT;
319
320 return overlay_attr->store(overlay, buf, size);
321}
322
323static struct sysfs_ops overlay_sysfs_ops = {
324 .show = overlay_attr_show,
325 .store = overlay_attr_store,
326};
327
328static struct kobj_type overlay_ktype = {
329 .sysfs_ops = &overlay_sysfs_ops,
330 .default_attrs = overlay_sysfs_attrs,
331};
332
333/* Check if overlay parameters are compatible with display */
334int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
335{
336 struct omap_overlay_info *info;
337 u16 outw, outh;
338 u16 dw, dh;
339
340 if (!dssdev)
341 return 0;
342
343 if (!ovl->info.enabled)
344 return 0;
345
346 info = &ovl->info;
347
348 if (info->paddr == 0) {
349 DSSDBG("check_overlay failed: paddr 0\n");
350 return -EINVAL;
351 }
352
353 dssdev->get_resolution(dssdev, &dw, &dh);
354
355 DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n",
356 ovl->id,
357 info->pos_x, info->pos_y,
358 info->width, info->height,
359 info->out_width, info->out_height,
360 dw, dh);
361
362 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
363 outw = info->width;
364 outh = info->height;
365 } else {
366 if (info->out_width == 0)
367 outw = info->width;
368 else
369 outw = info->out_width;
370
371 if (info->out_height == 0)
372 outh = info->height;
373 else
374 outh = info->out_height;
375 }
376
377 if (dw < info->pos_x + outw) {
378 DSSDBG("check_overlay failed 1: %d < %d + %d\n",
379 dw, info->pos_x, outw);
380 return -EINVAL;
381 }
382
383 if (dh < info->pos_y + outh) {
384 DSSDBG("check_overlay failed 2: %d < %d + %d\n",
385 dh, info->pos_y, outh);
386 return -EINVAL;
387 }
388
389 if ((ovl->supported_modes & info->color_mode) == 0) {
390 DSSERR("overlay doesn't support mode %d\n", info->color_mode);
391 return -EINVAL;
392 }
393
394 return 0;
395}
396
397static int dss_ovl_set_overlay_info(struct omap_overlay *ovl,
398 struct omap_overlay_info *info)
399{
400 int r;
401 struct omap_overlay_info old_info;
402
403 old_info = ovl->info;
404 ovl->info = *info;
405
406 if (ovl->manager) {
407 r = dss_check_overlay(ovl, ovl->manager->device);
408 if (r) {
409 ovl->info = old_info;
410 return r;
411 }
412 }
413
414 ovl->info_dirty = true;
415
416 return 0;
417}
418
419static void dss_ovl_get_overlay_info(struct omap_overlay *ovl,
420 struct omap_overlay_info *info)
421{
422 *info = ovl->info;
423}
424
425static int dss_ovl_wait_for_go(struct omap_overlay *ovl)
426{
427 return dss_mgr_wait_for_go_ovl(ovl);
428}
429
430static int omap_dss_set_manager(struct omap_overlay *ovl,
431 struct omap_overlay_manager *mgr)
432{
433 if (!mgr)
434 return -EINVAL;
435
436 if (ovl->manager) {
437 DSSERR("overlay '%s' already has a manager '%s'\n",
438 ovl->name, ovl->manager->name);
439 return -EINVAL;
440 }
441
442 if (ovl->info.enabled) {
443 DSSERR("overlay has to be disabled to change the manager\n");
444 return -EINVAL;
445 }
446
447 ovl->manager = mgr;
448
449 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
450 /* XXX: on manual update display, in auto update mode, a bug happens
451 * here. When an overlay is first enabled on LCD, then it's disabled,
452 * and the manager is changed to TV, we sometimes get SYNC_LOST_DIGIT
453 * errors. Waiting before changing the channel_out fixes it. I'm
454 * guessing that the overlay is still somehow being used for the LCD,
455 * but I don't understand how or why. */
456 msleep(40);
457 dispc_set_channel_out(ovl->id, mgr->id);
458 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
459
460 return 0;
461}
462
463static int omap_dss_unset_manager(struct omap_overlay *ovl)
464{
465 int r;
466
467 if (!ovl->manager) {
468 DSSERR("failed to detach overlay: manager not set\n");
469 return -EINVAL;
470 }
471
472 if (ovl->info.enabled) {
473 DSSERR("overlay has to be disabled to unset the manager\n");
474 return -EINVAL;
475 }
476
477 r = ovl->wait_for_go(ovl);
478 if (r)
479 return r;
480
481 ovl->manager = NULL;
482
483 return 0;
484}
485
486int omap_dss_get_num_overlays(void)
487{
488 return num_overlays;
489}
490EXPORT_SYMBOL(omap_dss_get_num_overlays);
491
492struct omap_overlay *omap_dss_get_overlay(int num)
493{
494 int i = 0;
495 struct omap_overlay *ovl;
496
497 list_for_each_entry(ovl, &overlay_list, list) {
498 if (i++ == num)
499 return ovl;
500 }
501
502 return NULL;
503}
504EXPORT_SYMBOL(omap_dss_get_overlay);
505
506static void omap_dss_add_overlay(struct omap_overlay *overlay)
507{
508 ++num_overlays;
509 list_add_tail(&overlay->list, &overlay_list);
510}
511
512static struct omap_overlay *dispc_overlays[3];
513
514void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
515{
516 mgr->num_overlays = 3;
517 mgr->overlays = dispc_overlays;
518}
519
520#ifdef L4_EXAMPLE
521static struct omap_overlay *l4_overlays[1];
522void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr)
523{
524 mgr->num_overlays = 1;
525 mgr->overlays = l4_overlays;
526}
527#endif
528
529void dss_init_overlays(struct platform_device *pdev)
530{
531 int i, r;
532
533 INIT_LIST_HEAD(&overlay_list);
534
535 num_overlays = 0;
536
537 for (i = 0; i < 3; ++i) {
538 struct omap_overlay *ovl;
539 ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
540
541 BUG_ON(ovl == NULL);
542
543 switch (i) {
544 case 0:
545 ovl->name = "gfx";
546 ovl->id = OMAP_DSS_GFX;
547 ovl->supported_modes = cpu_is_omap34xx() ?
548 OMAP_DSS_COLOR_GFX_OMAP3 :
549 OMAP_DSS_COLOR_GFX_OMAP2;
550 ovl->caps = OMAP_DSS_OVL_CAP_DISPC;
551 ovl->info.global_alpha = 255;
552 break;
553 case 1:
554 ovl->name = "vid1";
555 ovl->id = OMAP_DSS_VIDEO1;
556 ovl->supported_modes = cpu_is_omap34xx() ?
557 OMAP_DSS_COLOR_VID1_OMAP3 :
558 OMAP_DSS_COLOR_VID_OMAP2;
559 ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
560 OMAP_DSS_OVL_CAP_DISPC;
561 ovl->info.global_alpha = 255;
562 break;
563 case 2:
564 ovl->name = "vid2";
565 ovl->id = OMAP_DSS_VIDEO2;
566 ovl->supported_modes = cpu_is_omap34xx() ?
567 OMAP_DSS_COLOR_VID2_OMAP3 :
568 OMAP_DSS_COLOR_VID_OMAP2;
569 ovl->caps = OMAP_DSS_OVL_CAP_SCALE |
570 OMAP_DSS_OVL_CAP_DISPC;
571 ovl->info.global_alpha = 255;
572 break;
573 }
574
575 ovl->set_manager = &omap_dss_set_manager;
576 ovl->unset_manager = &omap_dss_unset_manager;
577 ovl->set_overlay_info = &dss_ovl_set_overlay_info;
578 ovl->get_overlay_info = &dss_ovl_get_overlay_info;
579 ovl->wait_for_go = &dss_ovl_wait_for_go;
580
581 omap_dss_add_overlay(ovl);
582
583 r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
584 &pdev->dev.kobj, "overlay%d", i);
585
586 if (r) {
587 DSSERR("failed to create sysfs file\n");
588 continue;
589 }
590
591 dispc_overlays[i] = ovl;
592 }
593
594#ifdef L4_EXAMPLE
595 {
596 struct omap_overlay *ovl;
597 ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
598
599 BUG_ON(ovl == NULL);
600
601 ovl->name = "l4";
602 ovl->supported_modes = OMAP_DSS_COLOR_RGB24U;
603
604 ovl->set_manager = &omap_dss_set_manager;
605 ovl->unset_manager = &omap_dss_unset_manager;
606 ovl->set_overlay_info = &dss_ovl_set_overlay_info;
607 ovl->get_overlay_info = &dss_ovl_get_overlay_info;
608
609 omap_dss_add_overlay(ovl);
610
611 r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
612 &pdev->dev.kobj, "overlayl4");
613
614 if (r)
615 DSSERR("failed to create sysfs file\n");
616
617 l4_overlays[0] = ovl;
618 }
619#endif
620}
621
622/* connect overlays to the new device, if not already connected. if force
623 * selected, connect always. */
624void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
625{
626 int i;
627 struct omap_overlay_manager *lcd_mgr;
628 struct omap_overlay_manager *tv_mgr;
629 struct omap_overlay_manager *mgr = NULL;
630
631 lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD);
632 tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV);
633
634 if (dssdev->type != OMAP_DISPLAY_TYPE_VENC) {
635 if (!lcd_mgr->device || force) {
636 if (lcd_mgr->device)
637 lcd_mgr->unset_device(lcd_mgr);
638 lcd_mgr->set_device(lcd_mgr, dssdev);
639 mgr = lcd_mgr;
640 }
641 }
642
643 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
644 if (!tv_mgr->device || force) {
645 if (tv_mgr->device)
646 tv_mgr->unset_device(tv_mgr);
647 tv_mgr->set_device(tv_mgr, dssdev);
648 mgr = tv_mgr;
649 }
650 }
651
652 if (mgr) {
653 for (i = 0; i < 3; i++) {
654 struct omap_overlay *ovl;
655 ovl = omap_dss_get_overlay(i);
656 if (!ovl->manager || force) {
657 if (ovl->manager)
658 omap_dss_unset_manager(ovl);
659 omap_dss_set_manager(ovl, mgr);
660 }
661 }
662 }
663}
664
665void dss_uninit_overlays(struct platform_device *pdev)
666{
667 struct omap_overlay *ovl;
668
669 while (!list_empty(&overlay_list)) {
670 ovl = list_first_entry(&overlay_list,
671 struct omap_overlay, list);
672 list_del(&ovl->list);
673 kobject_del(&ovl->kobj);
674 kobject_put(&ovl->kobj);
675 kfree(ovl);
676 }
677
678 num_overlays = 0;
679}
680
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
new file mode 100644
index 000000000000..d0b3006ad8a5
--- /dev/null
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -0,0 +1,1309 @@
1/*
2 * linux/drivers/video/omap2/dss/rfbi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#define DSS_SUBSYS_NAME "RFBI"
24
25#include <linux/kernel.h>
26#include <linux/dma-mapping.h>
27#include <linux/vmalloc.h>
28#include <linux/clk.h>
29#include <linux/io.h>
30#include <linux/delay.h>
31#include <linux/kfifo.h>
32#include <linux/ktime.h>
33#include <linux/hrtimer.h>
34#include <linux/seq_file.h>
35
36#include <plat/display.h>
37#include "dss.h"
38
39/*#define MEASURE_PERF*/
40
41#define RFBI_BASE 0x48050800
42
43struct rfbi_reg { u16 idx; };
44
45#define RFBI_REG(idx) ((const struct rfbi_reg) { idx })
46
47#define RFBI_REVISION RFBI_REG(0x0000)
48#define RFBI_SYSCONFIG RFBI_REG(0x0010)
49#define RFBI_SYSSTATUS RFBI_REG(0x0014)
50#define RFBI_CONTROL RFBI_REG(0x0040)
51#define RFBI_PIXEL_CNT RFBI_REG(0x0044)
52#define RFBI_LINE_NUMBER RFBI_REG(0x0048)
53#define RFBI_CMD RFBI_REG(0x004c)
54#define RFBI_PARAM RFBI_REG(0x0050)
55#define RFBI_DATA RFBI_REG(0x0054)
56#define RFBI_READ RFBI_REG(0x0058)
57#define RFBI_STATUS RFBI_REG(0x005c)
58
59#define RFBI_CONFIG(n) RFBI_REG(0x0060 + (n)*0x18)
60#define RFBI_ONOFF_TIME(n) RFBI_REG(0x0064 + (n)*0x18)
61#define RFBI_CYCLE_TIME(n) RFBI_REG(0x0068 + (n)*0x18)
62#define RFBI_DATA_CYCLE1(n) RFBI_REG(0x006c + (n)*0x18)
63#define RFBI_DATA_CYCLE2(n) RFBI_REG(0x0070 + (n)*0x18)
64#define RFBI_DATA_CYCLE3(n) RFBI_REG(0x0074 + (n)*0x18)
65
66#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090)
67#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094)
68
69#define RFBI_CMD_FIFO_LEN_BYTES (16 * sizeof(struct update_param))
70
71#define REG_FLD_MOD(idx, val, start, end) \
72 rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
73
74/* To work around an RFBI transfer rate limitation */
75#define OMAP_RFBI_RATE_LIMIT 1
76
77enum omap_rfbi_cycleformat {
78 OMAP_DSS_RFBI_CYCLEFORMAT_1_1 = 0,
79 OMAP_DSS_RFBI_CYCLEFORMAT_2_1 = 1,
80 OMAP_DSS_RFBI_CYCLEFORMAT_3_1 = 2,
81 OMAP_DSS_RFBI_CYCLEFORMAT_3_2 = 3,
82};
83
84enum omap_rfbi_datatype {
85 OMAP_DSS_RFBI_DATATYPE_12 = 0,
86 OMAP_DSS_RFBI_DATATYPE_16 = 1,
87 OMAP_DSS_RFBI_DATATYPE_18 = 2,
88 OMAP_DSS_RFBI_DATATYPE_24 = 3,
89};
90
91enum omap_rfbi_parallelmode {
92 OMAP_DSS_RFBI_PARALLELMODE_8 = 0,
93 OMAP_DSS_RFBI_PARALLELMODE_9 = 1,
94 OMAP_DSS_RFBI_PARALLELMODE_12 = 2,
95 OMAP_DSS_RFBI_PARALLELMODE_16 = 3,
96};
97
98enum update_cmd {
99 RFBI_CMD_UPDATE = 0,
100 RFBI_CMD_SYNC = 1,
101};
102
103static int rfbi_convert_timings(struct rfbi_timings *t);
104static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
105static void process_cmd_fifo(void);
106
107static struct {
108 void __iomem *base;
109
110 unsigned long l4_khz;
111
112 enum omap_rfbi_datatype datatype;
113 enum omap_rfbi_parallelmode parallelmode;
114
115 enum omap_rfbi_te_mode te_mode;
116 int te_enabled;
117
118 void (*framedone_callback)(void *data);
119 void *framedone_callback_data;
120
121 struct omap_dss_device *dssdev[2];
122
123 struct kfifo *cmd_fifo;
124 spinlock_t cmd_lock;
125 struct completion cmd_done;
126 atomic_t cmd_fifo_full;
127 atomic_t cmd_pending;
128#ifdef MEASURE_PERF
129 unsigned perf_bytes;
130 ktime_t perf_setup_time;
131 ktime_t perf_start_time;
132#endif
133} rfbi;
134
135struct update_region {
136 u16 x;
137 u16 y;
138 u16 w;
139 u16 h;
140};
141
142struct update_param {
143 u8 rfbi_module;
144 u8 cmd;
145
146 union {
147 struct update_region r;
148 struct completion *sync;
149 } par;
150};
151
152static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
153{
154 __raw_writel(val, rfbi.base + idx.idx);
155}
156
157static inline u32 rfbi_read_reg(const struct rfbi_reg idx)
158{
159 return __raw_readl(rfbi.base + idx.idx);
160}
161
162static void rfbi_enable_clocks(bool enable)
163{
164 if (enable)
165 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
166 else
167 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
168}
169
170void omap_rfbi_write_command(const void *buf, u32 len)
171{
172 rfbi_enable_clocks(1);
173 switch (rfbi.parallelmode) {
174 case OMAP_DSS_RFBI_PARALLELMODE_8:
175 {
176 const u8 *b = buf;
177 for (; len; len--)
178 rfbi_write_reg(RFBI_CMD, *b++);
179 break;
180 }
181
182 case OMAP_DSS_RFBI_PARALLELMODE_16:
183 {
184 const u16 *w = buf;
185 BUG_ON(len & 1);
186 for (; len; len -= 2)
187 rfbi_write_reg(RFBI_CMD, *w++);
188 break;
189 }
190
191 case OMAP_DSS_RFBI_PARALLELMODE_9:
192 case OMAP_DSS_RFBI_PARALLELMODE_12:
193 default:
194 BUG();
195 }
196 rfbi_enable_clocks(0);
197}
198EXPORT_SYMBOL(omap_rfbi_write_command);
199
200void omap_rfbi_read_data(void *buf, u32 len)
201{
202 rfbi_enable_clocks(1);
203 switch (rfbi.parallelmode) {
204 case OMAP_DSS_RFBI_PARALLELMODE_8:
205 {
206 u8 *b = buf;
207 for (; len; len--) {
208 rfbi_write_reg(RFBI_READ, 0);
209 *b++ = rfbi_read_reg(RFBI_READ);
210 }
211 break;
212 }
213
214 case OMAP_DSS_RFBI_PARALLELMODE_16:
215 {
216 u16 *w = buf;
217 BUG_ON(len & ~1);
218 for (; len; len -= 2) {
219 rfbi_write_reg(RFBI_READ, 0);
220 *w++ = rfbi_read_reg(RFBI_READ);
221 }
222 break;
223 }
224
225 case OMAP_DSS_RFBI_PARALLELMODE_9:
226 case OMAP_DSS_RFBI_PARALLELMODE_12:
227 default:
228 BUG();
229 }
230 rfbi_enable_clocks(0);
231}
232EXPORT_SYMBOL(omap_rfbi_read_data);
233
234void omap_rfbi_write_data(const void *buf, u32 len)
235{
236 rfbi_enable_clocks(1);
237 switch (rfbi.parallelmode) {
238 case OMAP_DSS_RFBI_PARALLELMODE_8:
239 {
240 const u8 *b = buf;
241 for (; len; len--)
242 rfbi_write_reg(RFBI_PARAM, *b++);
243 break;
244 }
245
246 case OMAP_DSS_RFBI_PARALLELMODE_16:
247 {
248 const u16 *w = buf;
249 BUG_ON(len & 1);
250 for (; len; len -= 2)
251 rfbi_write_reg(RFBI_PARAM, *w++);
252 break;
253 }
254
255 case OMAP_DSS_RFBI_PARALLELMODE_9:
256 case OMAP_DSS_RFBI_PARALLELMODE_12:
257 default:
258 BUG();
259
260 }
261 rfbi_enable_clocks(0);
262}
263EXPORT_SYMBOL(omap_rfbi_write_data);
264
265void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
266 u16 x, u16 y,
267 u16 w, u16 h)
268{
269 int start_offset = scr_width * y + x;
270 int horiz_offset = scr_width - w;
271 int i;
272
273 rfbi_enable_clocks(1);
274
275 if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
276 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
277 const u16 __iomem *pd = buf;
278 pd += start_offset;
279
280 for (; h; --h) {
281 for (i = 0; i < w; ++i) {
282 const u8 __iomem *b = (const u8 __iomem *)pd;
283 rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
284 rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
285 ++pd;
286 }
287 pd += horiz_offset;
288 }
289 } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_24 &&
290 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_8) {
291 const u32 __iomem *pd = buf;
292 pd += start_offset;
293
294 for (; h; --h) {
295 for (i = 0; i < w; ++i) {
296 const u8 __iomem *b = (const u8 __iomem *)pd;
297 rfbi_write_reg(RFBI_PARAM, __raw_readb(b+2));
298 rfbi_write_reg(RFBI_PARAM, __raw_readb(b+1));
299 rfbi_write_reg(RFBI_PARAM, __raw_readb(b+0));
300 ++pd;
301 }
302 pd += horiz_offset;
303 }
304 } else if (rfbi.datatype == OMAP_DSS_RFBI_DATATYPE_16 &&
305 rfbi.parallelmode == OMAP_DSS_RFBI_PARALLELMODE_16) {
306 const u16 __iomem *pd = buf;
307 pd += start_offset;
308
309 for (; h; --h) {
310 for (i = 0; i < w; ++i) {
311 rfbi_write_reg(RFBI_PARAM, __raw_readw(pd));
312 ++pd;
313 }
314 pd += horiz_offset;
315 }
316 } else {
317 BUG();
318 }
319
320 rfbi_enable_clocks(0);
321}
322EXPORT_SYMBOL(omap_rfbi_write_pixels);
323
324#ifdef MEASURE_PERF
325static void perf_mark_setup(void)
326{
327 rfbi.perf_setup_time = ktime_get();
328}
329
330static void perf_mark_start(void)
331{
332 rfbi.perf_start_time = ktime_get();
333}
334
335static void perf_show(const char *name)
336{
337 ktime_t t, setup_time, trans_time;
338 u32 total_bytes;
339 u32 setup_us, trans_us, total_us;
340
341 t = ktime_get();
342
343 setup_time = ktime_sub(rfbi.perf_start_time, rfbi.perf_setup_time);
344 setup_us = (u32)ktime_to_us(setup_time);
345 if (setup_us == 0)
346 setup_us = 1;
347
348 trans_time = ktime_sub(t, rfbi.perf_start_time);
349 trans_us = (u32)ktime_to_us(trans_time);
350 if (trans_us == 0)
351 trans_us = 1;
352
353 total_us = setup_us + trans_us;
354
355 total_bytes = rfbi.perf_bytes;
356
357 DSSINFO("%s update %u us + %u us = %u us (%uHz), %u bytes, "
358 "%u kbytes/sec\n",
359 name,
360 setup_us,
361 trans_us,
362 total_us,
363 1000*1000 / total_us,
364 total_bytes,
365 total_bytes * 1000 / total_us);
366}
367#else
368#define perf_mark_setup()
369#define perf_mark_start()
370#define perf_show(x)
371#endif
372
373void rfbi_transfer_area(u16 width, u16 height,
374 void (callback)(void *data), void *data)
375{
376 u32 l;
377
378 /*BUG_ON(callback == 0);*/
379 BUG_ON(rfbi.framedone_callback != NULL);
380
381 DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
382
383 dispc_set_lcd_size(width, height);
384
385 dispc_enable_lcd_out(1);
386
387 rfbi.framedone_callback = callback;
388 rfbi.framedone_callback_data = data;
389
390 rfbi_enable_clocks(1);
391
392 rfbi_write_reg(RFBI_PIXEL_CNT, width * height);
393
394 l = rfbi_read_reg(RFBI_CONTROL);
395 l = FLD_MOD(l, 1, 0, 0); /* enable */
396 if (!rfbi.te_enabled)
397 l = FLD_MOD(l, 1, 4, 4); /* ITE */
398
399 perf_mark_start();
400
401 rfbi_write_reg(RFBI_CONTROL, l);
402}
403
404static void framedone_callback(void *data, u32 mask)
405{
406 void (*callback)(void *data);
407
408 DSSDBG("FRAMEDONE\n");
409
410 perf_show("DISPC");
411
412 REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
413
414 rfbi_enable_clocks(0);
415
416 callback = rfbi.framedone_callback;
417 rfbi.framedone_callback = NULL;
418
419 /*callback(rfbi.framedone_callback_data);*/
420
421 atomic_set(&rfbi.cmd_pending, 0);
422
423 process_cmd_fifo();
424}
425
426#if 1 /* VERBOSE */
427static void rfbi_print_timings(void)
428{
429 u32 l;
430 u32 time;
431
432 l = rfbi_read_reg(RFBI_CONFIG(0));
433 time = 1000000000 / rfbi.l4_khz;
434 if (l & (1 << 4))
435 time *= 2;
436
437 DSSDBG("Tick time %u ps\n", time);
438 l = rfbi_read_reg(RFBI_ONOFF_TIME(0));
439 DSSDBG("CSONTIME %d, CSOFFTIME %d, WEONTIME %d, WEOFFTIME %d, "
440 "REONTIME %d, REOFFTIME %d\n",
441 l & 0x0f, (l >> 4) & 0x3f, (l >> 10) & 0x0f, (l >> 14) & 0x3f,
442 (l >> 20) & 0x0f, (l >> 24) & 0x3f);
443
444 l = rfbi_read_reg(RFBI_CYCLE_TIME(0));
445 DSSDBG("WECYCLETIME %d, RECYCLETIME %d, CSPULSEWIDTH %d, "
446 "ACCESSTIME %d\n",
447 (l & 0x3f), (l >> 6) & 0x3f, (l >> 12) & 0x3f,
448 (l >> 22) & 0x3f);
449}
450#else
451static void rfbi_print_timings(void) {}
452#endif
453
454
455
456
457static u32 extif_clk_period;
458
459static inline unsigned long round_to_extif_ticks(unsigned long ps, int div)
460{
461 int bus_tick = extif_clk_period * div;
462 return (ps + bus_tick - 1) / bus_tick * bus_tick;
463}
464
465static int calc_reg_timing(struct rfbi_timings *t, int div)
466{
467 t->clk_div = div;
468
469 t->cs_on_time = round_to_extif_ticks(t->cs_on_time, div);
470
471 t->we_on_time = round_to_extif_ticks(t->we_on_time, div);
472 t->we_off_time = round_to_extif_ticks(t->we_off_time, div);
473 t->we_cycle_time = round_to_extif_ticks(t->we_cycle_time, div);
474
475 t->re_on_time = round_to_extif_ticks(t->re_on_time, div);
476 t->re_off_time = round_to_extif_ticks(t->re_off_time, div);
477 t->re_cycle_time = round_to_extif_ticks(t->re_cycle_time, div);
478
479 t->access_time = round_to_extif_ticks(t->access_time, div);
480 t->cs_off_time = round_to_extif_ticks(t->cs_off_time, div);
481 t->cs_pulse_width = round_to_extif_ticks(t->cs_pulse_width, div);
482
483 DSSDBG("[reg]cson %d csoff %d reon %d reoff %d\n",
484 t->cs_on_time, t->cs_off_time, t->re_on_time, t->re_off_time);
485 DSSDBG("[reg]weon %d weoff %d recyc %d wecyc %d\n",
486 t->we_on_time, t->we_off_time, t->re_cycle_time,
487 t->we_cycle_time);
488 DSSDBG("[reg]rdaccess %d cspulse %d\n",
489 t->access_time, t->cs_pulse_width);
490
491 return rfbi_convert_timings(t);
492}
493
494static int calc_extif_timings(struct rfbi_timings *t)
495{
496 u32 max_clk_div;
497 int div;
498
499 rfbi_get_clk_info(&extif_clk_period, &max_clk_div);
500 for (div = 1; div <= max_clk_div; div++) {
501 if (calc_reg_timing(t, div) == 0)
502 break;
503 }
504
505 if (div <= max_clk_div)
506 return 0;
507
508 DSSERR("can't setup timings\n");
509 return -1;
510}
511
512
513void rfbi_set_timings(int rfbi_module, struct rfbi_timings *t)
514{
515 int r;
516
517 if (!t->converted) {
518 r = calc_extif_timings(t);
519 if (r < 0)
520 DSSERR("Failed to calc timings\n");
521 }
522
523 BUG_ON(!t->converted);
524
525 rfbi_enable_clocks(1);
526 rfbi_write_reg(RFBI_ONOFF_TIME(rfbi_module), t->tim[0]);
527 rfbi_write_reg(RFBI_CYCLE_TIME(rfbi_module), t->tim[1]);
528
529 /* TIMEGRANULARITY */
530 REG_FLD_MOD(RFBI_CONFIG(rfbi_module),
531 (t->tim[2] ? 1 : 0), 4, 4);
532
533 rfbi_print_timings();
534 rfbi_enable_clocks(0);
535}
536
537static int ps_to_rfbi_ticks(int time, int div)
538{
539 unsigned long tick_ps;
540 int ret;
541
542 /* Calculate in picosecs to yield more exact results */
543 tick_ps = 1000000000 / (rfbi.l4_khz) * div;
544
545 ret = (time + tick_ps - 1) / tick_ps;
546
547 return ret;
548}
549
550#ifdef OMAP_RFBI_RATE_LIMIT
551unsigned long rfbi_get_max_tx_rate(void)
552{
553 unsigned long l4_rate, dss1_rate;
554 int min_l4_ticks = 0;
555 int i;
556
557 /* According to TI this can't be calculated so make the
558 * adjustments for a couple of known frequencies and warn for
559 * others.
560 */
561 static const struct {
562 unsigned long l4_clk; /* HZ */
563 unsigned long dss1_clk; /* HZ */
564 unsigned long min_l4_ticks;
565 } ftab[] = {
566 { 55, 132, 7, }, /* 7.86 MPix/s */
567 { 110, 110, 12, }, /* 9.16 MPix/s */
568 { 110, 132, 10, }, /* 11 Mpix/s */
569 { 120, 120, 10, }, /* 12 Mpix/s */
570 { 133, 133, 10, }, /* 13.3 Mpix/s */
571 };
572
573 l4_rate = rfbi.l4_khz / 1000;
574 dss1_rate = dss_clk_get_rate(DSS_CLK_FCK1) / 1000000;
575
576 for (i = 0; i < ARRAY_SIZE(ftab); i++) {
577 /* Use a window instead of an exact match, to account
578 * for different DPLL multiplier / divider pairs.
579 */
580 if (abs(ftab[i].l4_clk - l4_rate) < 3 &&
581 abs(ftab[i].dss1_clk - dss1_rate) < 3) {
582 min_l4_ticks = ftab[i].min_l4_ticks;
583 break;
584 }
585 }
586 if (i == ARRAY_SIZE(ftab)) {
587 /* Can't be sure, return anyway the maximum not
588 * rate-limited. This might cause a problem only for the
589 * tearing synchronisation.
590 */
591 DSSERR("can't determine maximum RFBI transfer rate\n");
592 return rfbi.l4_khz * 1000;
593 }
594 return rfbi.l4_khz * 1000 / min_l4_ticks;
595}
596#else
597int rfbi_get_max_tx_rate(void)
598{
599 return rfbi.l4_khz * 1000;
600}
601#endif
602
603static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div)
604{
605 *clk_period = 1000000000 / rfbi.l4_khz;
606 *max_clk_div = 2;
607}
608
609static int rfbi_convert_timings(struct rfbi_timings *t)
610{
611 u32 l;
612 int reon, reoff, weon, weoff, cson, csoff, cs_pulse;
613 int actim, recyc, wecyc;
614 int div = t->clk_div;
615
616 if (div <= 0 || div > 2)
617 return -1;
618
619 /* Make sure that after conversion it still holds that:
620 * weoff > weon, reoff > reon, recyc >= reoff, wecyc >= weoff,
621 * csoff > cson, csoff >= max(weoff, reoff), actim > reon
622 */
623 weon = ps_to_rfbi_ticks(t->we_on_time, div);
624 weoff = ps_to_rfbi_ticks(t->we_off_time, div);
625 if (weoff <= weon)
626 weoff = weon + 1;
627 if (weon > 0x0f)
628 return -1;
629 if (weoff > 0x3f)
630 return -1;
631
632 reon = ps_to_rfbi_ticks(t->re_on_time, div);
633 reoff = ps_to_rfbi_ticks(t->re_off_time, div);
634 if (reoff <= reon)
635 reoff = reon + 1;
636 if (reon > 0x0f)
637 return -1;
638 if (reoff > 0x3f)
639 return -1;
640
641 cson = ps_to_rfbi_ticks(t->cs_on_time, div);
642 csoff = ps_to_rfbi_ticks(t->cs_off_time, div);
643 if (csoff <= cson)
644 csoff = cson + 1;
645 if (csoff < max(weoff, reoff))
646 csoff = max(weoff, reoff);
647 if (cson > 0x0f)
648 return -1;
649 if (csoff > 0x3f)
650 return -1;
651
652 l = cson;
653 l |= csoff << 4;
654 l |= weon << 10;
655 l |= weoff << 14;
656 l |= reon << 20;
657 l |= reoff << 24;
658
659 t->tim[0] = l;
660
661 actim = ps_to_rfbi_ticks(t->access_time, div);
662 if (actim <= reon)
663 actim = reon + 1;
664 if (actim > 0x3f)
665 return -1;
666
667 wecyc = ps_to_rfbi_ticks(t->we_cycle_time, div);
668 if (wecyc < weoff)
669 wecyc = weoff;
670 if (wecyc > 0x3f)
671 return -1;
672
673 recyc = ps_to_rfbi_ticks(t->re_cycle_time, div);
674 if (recyc < reoff)
675 recyc = reoff;
676 if (recyc > 0x3f)
677 return -1;
678
679 cs_pulse = ps_to_rfbi_ticks(t->cs_pulse_width, div);
680 if (cs_pulse > 0x3f)
681 return -1;
682
683 l = wecyc;
684 l |= recyc << 6;
685 l |= cs_pulse << 12;
686 l |= actim << 22;
687
688 t->tim[1] = l;
689
690 t->tim[2] = div - 1;
691
692 t->converted = 1;
693
694 return 0;
695}
696
697/* xxx FIX module selection missing */
698int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
699 unsigned hs_pulse_time, unsigned vs_pulse_time,
700 int hs_pol_inv, int vs_pol_inv, int extif_div)
701{
702 int hs, vs;
703 int min;
704 u32 l;
705
706 hs = ps_to_rfbi_ticks(hs_pulse_time, 1);
707 vs = ps_to_rfbi_ticks(vs_pulse_time, 1);
708 if (hs < 2)
709 return -EDOM;
710 if (mode == OMAP_DSS_RFBI_TE_MODE_2)
711 min = 2;
712 else /* OMAP_DSS_RFBI_TE_MODE_1 */
713 min = 4;
714 if (vs < min)
715 return -EDOM;
716 if (vs == hs)
717 return -EINVAL;
718 rfbi.te_mode = mode;
719 DSSDBG("setup_te: mode %d hs %d vs %d hs_inv %d vs_inv %d\n",
720 mode, hs, vs, hs_pol_inv, vs_pol_inv);
721
722 rfbi_enable_clocks(1);
723 rfbi_write_reg(RFBI_HSYNC_WIDTH, hs);
724 rfbi_write_reg(RFBI_VSYNC_WIDTH, vs);
725
726 l = rfbi_read_reg(RFBI_CONFIG(0));
727 if (hs_pol_inv)
728 l &= ~(1 << 21);
729 else
730 l |= 1 << 21;
731 if (vs_pol_inv)
732 l &= ~(1 << 20);
733 else
734 l |= 1 << 20;
735 rfbi_enable_clocks(0);
736
737 return 0;
738}
739EXPORT_SYMBOL(omap_rfbi_setup_te);
740
741/* xxx FIX module selection missing */
742int omap_rfbi_enable_te(bool enable, unsigned line)
743{
744 u32 l;
745
746 DSSDBG("te %d line %d mode %d\n", enable, line, rfbi.te_mode);
747 if (line > (1 << 11) - 1)
748 return -EINVAL;
749
750 rfbi_enable_clocks(1);
751 l = rfbi_read_reg(RFBI_CONFIG(0));
752 l &= ~(0x3 << 2);
753 if (enable) {
754 rfbi.te_enabled = 1;
755 l |= rfbi.te_mode << 2;
756 } else
757 rfbi.te_enabled = 0;
758 rfbi_write_reg(RFBI_CONFIG(0), l);
759 rfbi_write_reg(RFBI_LINE_NUMBER, line);
760 rfbi_enable_clocks(0);
761
762 return 0;
763}
764EXPORT_SYMBOL(omap_rfbi_enable_te);
765
766#if 0
767static void rfbi_enable_config(int enable1, int enable2)
768{
769 u32 l;
770 int cs = 0;
771
772 if (enable1)
773 cs |= 1<<0;
774 if (enable2)
775 cs |= 1<<1;
776
777 rfbi_enable_clocks(1);
778
779 l = rfbi_read_reg(RFBI_CONTROL);
780
781 l = FLD_MOD(l, cs, 3, 2);
782 l = FLD_MOD(l, 0, 1, 1);
783
784 rfbi_write_reg(RFBI_CONTROL, l);
785
786
787 l = rfbi_read_reg(RFBI_CONFIG(0));
788 l = FLD_MOD(l, 0, 3, 2); /* TRIGGERMODE: ITE */
789 /*l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
790 /*l |= FLD_VAL(0, 8, 7); */ /* L4FORMAT, 1pix/L4 */
791
792 l = FLD_MOD(l, 0, 16, 16); /* A0POLARITY */
793 l = FLD_MOD(l, 1, 20, 20); /* TE_VSYNC_POLARITY */
794 l = FLD_MOD(l, 1, 21, 21); /* HSYNCPOLARITY */
795
796 l = FLD_MOD(l, OMAP_DSS_RFBI_PARALLELMODE_8, 1, 0);
797 rfbi_write_reg(RFBI_CONFIG(0), l);
798
799 rfbi_enable_clocks(0);
800}
801#endif
802
803int rfbi_configure(int rfbi_module, int bpp, int lines)
804{
805 u32 l;
806 int cycle1 = 0, cycle2 = 0, cycle3 = 0;
807 enum omap_rfbi_cycleformat cycleformat;
808 enum omap_rfbi_datatype datatype;
809 enum omap_rfbi_parallelmode parallelmode;
810
811 switch (bpp) {
812 case 12:
813 datatype = OMAP_DSS_RFBI_DATATYPE_12;
814 break;
815 case 16:
816 datatype = OMAP_DSS_RFBI_DATATYPE_16;
817 break;
818 case 18:
819 datatype = OMAP_DSS_RFBI_DATATYPE_18;
820 break;
821 case 24:
822 datatype = OMAP_DSS_RFBI_DATATYPE_24;
823 break;
824 default:
825 BUG();
826 return 1;
827 }
828 rfbi.datatype = datatype;
829
830 switch (lines) {
831 case 8:
832 parallelmode = OMAP_DSS_RFBI_PARALLELMODE_8;
833 break;
834 case 9:
835 parallelmode = OMAP_DSS_RFBI_PARALLELMODE_9;
836 break;
837 case 12:
838 parallelmode = OMAP_DSS_RFBI_PARALLELMODE_12;
839 break;
840 case 16:
841 parallelmode = OMAP_DSS_RFBI_PARALLELMODE_16;
842 break;
843 default:
844 BUG();
845 return 1;
846 }
847 rfbi.parallelmode = parallelmode;
848
849 if ((bpp % lines) == 0) {
850 switch (bpp / lines) {
851 case 1:
852 cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_1_1;
853 break;
854 case 2:
855 cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_2_1;
856 break;
857 case 3:
858 cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_1;
859 break;
860 default:
861 BUG();
862 return 1;
863 }
864 } else if ((2 * bpp % lines) == 0) {
865 if ((2 * bpp / lines) == 3)
866 cycleformat = OMAP_DSS_RFBI_CYCLEFORMAT_3_2;
867 else {
868 BUG();
869 return 1;
870 }
871 } else {
872 BUG();
873 return 1;
874 }
875
876 switch (cycleformat) {
877 case OMAP_DSS_RFBI_CYCLEFORMAT_1_1:
878 cycle1 = lines;
879 break;
880
881 case OMAP_DSS_RFBI_CYCLEFORMAT_2_1:
882 cycle1 = lines;
883 cycle2 = lines;
884 break;
885
886 case OMAP_DSS_RFBI_CYCLEFORMAT_3_1:
887 cycle1 = lines;
888 cycle2 = lines;
889 cycle3 = lines;
890 break;
891
892 case OMAP_DSS_RFBI_CYCLEFORMAT_3_2:
893 cycle1 = lines;
894 cycle2 = (lines / 2) | ((lines / 2) << 16);
895 cycle3 = (lines << 16);
896 break;
897 }
898
899 rfbi_enable_clocks(1);
900
901 REG_FLD_MOD(RFBI_CONTROL, 0, 3, 2); /* clear CS */
902
903 l = 0;
904 l |= FLD_VAL(parallelmode, 1, 0);
905 l |= FLD_VAL(0, 3, 2); /* TRIGGERMODE: ITE */
906 l |= FLD_VAL(0, 4, 4); /* TIMEGRANULARITY */
907 l |= FLD_VAL(datatype, 6, 5);
908 /* l |= FLD_VAL(2, 8, 7); */ /* L4FORMAT, 2pix/L4 */
909 l |= FLD_VAL(0, 8, 7); /* L4FORMAT, 1pix/L4 */
910 l |= FLD_VAL(cycleformat, 10, 9);
911 l |= FLD_VAL(0, 12, 11); /* UNUSEDBITS */
912 l |= FLD_VAL(0, 16, 16); /* A0POLARITY */
913 l |= FLD_VAL(0, 17, 17); /* REPOLARITY */
914 l |= FLD_VAL(0, 18, 18); /* WEPOLARITY */
915 l |= FLD_VAL(0, 19, 19); /* CSPOLARITY */
916 l |= FLD_VAL(1, 20, 20); /* TE_VSYNC_POLARITY */
917 l |= FLD_VAL(1, 21, 21); /* HSYNCPOLARITY */
918 rfbi_write_reg(RFBI_CONFIG(rfbi_module), l);
919
920 rfbi_write_reg(RFBI_DATA_CYCLE1(rfbi_module), cycle1);
921 rfbi_write_reg(RFBI_DATA_CYCLE2(rfbi_module), cycle2);
922 rfbi_write_reg(RFBI_DATA_CYCLE3(rfbi_module), cycle3);
923
924
925 l = rfbi_read_reg(RFBI_CONTROL);
926 l = FLD_MOD(l, rfbi_module+1, 3, 2); /* Select CSx */
927 l = FLD_MOD(l, 0, 1, 1); /* clear bypass */
928 rfbi_write_reg(RFBI_CONTROL, l);
929
930
931 DSSDBG("RFBI config: bpp %d, lines %d, cycles: 0x%x 0x%x 0x%x\n",
932 bpp, lines, cycle1, cycle2, cycle3);
933
934 rfbi_enable_clocks(0);
935
936 return 0;
937}
938EXPORT_SYMBOL(rfbi_configure);
939
940static int rfbi_find_display(struct omap_dss_device *dssdev)
941{
942 if (dssdev == rfbi.dssdev[0])
943 return 0;
944
945 if (dssdev == rfbi.dssdev[1])
946 return 1;
947
948 BUG();
949 return -1;
950}
951
952
953static void signal_fifo_waiters(void)
954{
955 if (atomic_read(&rfbi.cmd_fifo_full) > 0) {
956 /* DSSDBG("SIGNALING: Fifo not full for waiter!\n"); */
957 complete(&rfbi.cmd_done);
958 atomic_dec(&rfbi.cmd_fifo_full);
959 }
960}
961
962/* returns 1 for async op, and 0 for sync op */
963static int do_update(struct omap_dss_device *dssdev, struct update_region *upd)
964{
965 u16 x = upd->x;
966 u16 y = upd->y;
967 u16 w = upd->w;
968 u16 h = upd->h;
969
970 perf_mark_setup();
971
972 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
973 /*dssdev->driver->enable_te(dssdev, 1); */
974 dss_setup_partial_planes(dssdev, &x, &y, &w, &h);
975 }
976
977#ifdef MEASURE_PERF
978 rfbi.perf_bytes = w * h * 2; /* XXX always 16bit */
979#endif
980
981 dssdev->driver->setup_update(dssdev, x, y, w, h);
982
983 if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
984 rfbi_transfer_area(w, h, NULL, NULL);
985 return 1;
986 } else {
987 struct omap_overlay *ovl;
988 void __iomem *addr;
989 int scr_width;
990
991 ovl = dssdev->manager->overlays[0];
992 scr_width = ovl->info.screen_width;
993 addr = ovl->info.vaddr;
994
995 omap_rfbi_write_pixels(addr, scr_width, x, y, w, h);
996
997 perf_show("L4");
998
999 return 0;
1000 }
1001}
1002
1003static void process_cmd_fifo(void)
1004{
1005 int len;
1006 struct update_param p;
1007 struct omap_dss_device *dssdev;
1008 unsigned long flags;
1009
1010 if (atomic_inc_return(&rfbi.cmd_pending) != 1)
1011 return;
1012
1013 while (true) {
1014 spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
1015
1016 len = __kfifo_get(rfbi.cmd_fifo, (unsigned char *)&p,
1017 sizeof(struct update_param));
1018 if (len == 0) {
1019 DSSDBG("nothing more in fifo\n");
1020 atomic_set(&rfbi.cmd_pending, 0);
1021 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
1022 break;
1023 }
1024
1025 /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
1026
1027 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
1028
1029 BUG_ON(len != sizeof(struct update_param));
1030 BUG_ON(p.rfbi_module > 1);
1031
1032 dssdev = rfbi.dssdev[p.rfbi_module];
1033
1034 if (p.cmd == RFBI_CMD_UPDATE) {
1035 if (do_update(dssdev, &p.par.r))
1036 break; /* async op */
1037 } else if (p.cmd == RFBI_CMD_SYNC) {
1038 DSSDBG("Signaling SYNC done!\n");
1039 complete(p.par.sync);
1040 } else
1041 BUG();
1042 }
1043
1044 signal_fifo_waiters();
1045}
1046
1047static void rfbi_push_cmd(struct update_param *p)
1048{
1049 int ret;
1050
1051 while (1) {
1052 unsigned long flags;
1053 int available;
1054
1055 spin_lock_irqsave(rfbi.cmd_fifo->lock, flags);
1056 available = RFBI_CMD_FIFO_LEN_BYTES -
1057 __kfifo_len(rfbi.cmd_fifo);
1058
1059/* DSSDBG("%d bytes left in fifo\n", available); */
1060 if (available < sizeof(struct update_param)) {
1061 DSSDBG("Going to wait because FIFO FULL..\n");
1062 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
1063 atomic_inc(&rfbi.cmd_fifo_full);
1064 wait_for_completion(&rfbi.cmd_done);
1065 /*DSSDBG("Woke up because fifo not full anymore\n");*/
1066 continue;
1067 }
1068
1069 ret = __kfifo_put(rfbi.cmd_fifo, (unsigned char *)p,
1070 sizeof(struct update_param));
1071/* DSSDBG("pushed %d bytes\n", ret);*/
1072
1073 spin_unlock_irqrestore(rfbi.cmd_fifo->lock, flags);
1074
1075 BUG_ON(ret != sizeof(struct update_param));
1076
1077 break;
1078 }
1079}
1080
1081static void rfbi_push_update(int rfbi_module, int x, int y, int w, int h)
1082{
1083 struct update_param p;
1084
1085 p.rfbi_module = rfbi_module;
1086 p.cmd = RFBI_CMD_UPDATE;
1087
1088 p.par.r.x = x;
1089 p.par.r.y = y;
1090 p.par.r.w = w;
1091 p.par.r.h = h;
1092
1093 DSSDBG("RFBI pushed %d,%d %dx%d\n", x, y, w, h);
1094
1095 rfbi_push_cmd(&p);
1096
1097 process_cmd_fifo();
1098}
1099
1100static void rfbi_push_sync(int rfbi_module, struct completion *sync_comp)
1101{
1102 struct update_param p;
1103
1104 p.rfbi_module = rfbi_module;
1105 p.cmd = RFBI_CMD_SYNC;
1106 p.par.sync = sync_comp;
1107
1108 rfbi_push_cmd(&p);
1109
1110 DSSDBG("RFBI sync pushed to cmd fifo\n");
1111
1112 process_cmd_fifo();
1113}
1114
1115void rfbi_dump_regs(struct seq_file *s)
1116{
1117#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
1118
1119 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
1120
1121 DUMPREG(RFBI_REVISION);
1122 DUMPREG(RFBI_SYSCONFIG);
1123 DUMPREG(RFBI_SYSSTATUS);
1124 DUMPREG(RFBI_CONTROL);
1125 DUMPREG(RFBI_PIXEL_CNT);
1126 DUMPREG(RFBI_LINE_NUMBER);
1127 DUMPREG(RFBI_CMD);
1128 DUMPREG(RFBI_PARAM);
1129 DUMPREG(RFBI_DATA);
1130 DUMPREG(RFBI_READ);
1131 DUMPREG(RFBI_STATUS);
1132
1133 DUMPREG(RFBI_CONFIG(0));
1134 DUMPREG(RFBI_ONOFF_TIME(0));
1135 DUMPREG(RFBI_CYCLE_TIME(0));
1136 DUMPREG(RFBI_DATA_CYCLE1(0));
1137 DUMPREG(RFBI_DATA_CYCLE2(0));
1138 DUMPREG(RFBI_DATA_CYCLE3(0));
1139
1140 DUMPREG(RFBI_CONFIG(1));
1141 DUMPREG(RFBI_ONOFF_TIME(1));
1142 DUMPREG(RFBI_CYCLE_TIME(1));
1143 DUMPREG(RFBI_DATA_CYCLE1(1));
1144 DUMPREG(RFBI_DATA_CYCLE2(1));
1145 DUMPREG(RFBI_DATA_CYCLE3(1));
1146
1147 DUMPREG(RFBI_VSYNC_WIDTH);
1148 DUMPREG(RFBI_HSYNC_WIDTH);
1149
1150 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
1151#undef DUMPREG
1152}
1153
1154int rfbi_init(void)
1155{
1156 u32 rev;
1157 u32 l;
1158
1159 spin_lock_init(&rfbi.cmd_lock);
1160 rfbi.cmd_fifo = kfifo_alloc(RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL,
1161 &rfbi.cmd_lock);
1162 if (IS_ERR(rfbi.cmd_fifo))
1163 return -ENOMEM;
1164
1165 init_completion(&rfbi.cmd_done);
1166 atomic_set(&rfbi.cmd_fifo_full, 0);
1167 atomic_set(&rfbi.cmd_pending, 0);
1168
1169 rfbi.base = ioremap(RFBI_BASE, SZ_256);
1170 if (!rfbi.base) {
1171 DSSERR("can't ioremap RFBI\n");
1172 return -ENOMEM;
1173 }
1174
1175 rfbi_enable_clocks(1);
1176
1177 msleep(10);
1178
1179 rfbi.l4_khz = dss_clk_get_rate(DSS_CLK_ICK) / 1000;
1180
1181 /* Enable autoidle and smart-idle */
1182 l = rfbi_read_reg(RFBI_SYSCONFIG);
1183 l |= (1 << 0) | (2 << 3);
1184 rfbi_write_reg(RFBI_SYSCONFIG, l);
1185
1186 rev = rfbi_read_reg(RFBI_REVISION);
1187 printk(KERN_INFO "OMAP RFBI rev %d.%d\n",
1188 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
1189
1190 rfbi_enable_clocks(0);
1191
1192 return 0;
1193}
1194
1195void rfbi_exit(void)
1196{
1197 DSSDBG("rfbi_exit\n");
1198
1199 kfifo_free(rfbi.cmd_fifo);
1200
1201 iounmap(rfbi.base);
1202}
1203
1204/* struct omap_display support */
1205static int rfbi_display_update(struct omap_dss_device *dssdev,
1206 u16 x, u16 y, u16 w, u16 h)
1207{
1208 int rfbi_module;
1209
1210 if (w == 0 || h == 0)
1211 return 0;
1212
1213 rfbi_module = rfbi_find_display(dssdev);
1214
1215 rfbi_push_update(rfbi_module, x, y, w, h);
1216
1217 return 0;
1218}
1219
1220static int rfbi_display_sync(struct omap_dss_device *dssdev)
1221{
1222 struct completion sync_comp;
1223 int rfbi_module;
1224
1225 rfbi_module = rfbi_find_display(dssdev);
1226
1227 init_completion(&sync_comp);
1228 rfbi_push_sync(rfbi_module, &sync_comp);
1229 DSSDBG("Waiting for SYNC to happen...\n");
1230 wait_for_completion(&sync_comp);
1231 DSSDBG("Released from SYNC\n");
1232 return 0;
1233}
1234
1235static int rfbi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
1236{
1237 dssdev->driver->enable_te(dssdev, enable);
1238 return 0;
1239}
1240
1241static int rfbi_display_enable(struct omap_dss_device *dssdev)
1242{
1243 int r;
1244
1245 r = omap_dss_start_device(dssdev);
1246 if (r) {
1247 DSSERR("failed to start device\n");
1248 goto err0;
1249 }
1250
1251 r = omap_dispc_register_isr(framedone_callback, NULL,
1252 DISPC_IRQ_FRAMEDONE);
1253 if (r) {
1254 DSSERR("can't get FRAMEDONE irq\n");
1255 goto err1;
1256 }
1257
1258 dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
1259
1260 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_RFBI);
1261
1262 dispc_set_tft_data_lines(dssdev->ctrl.pixel_size);
1263
1264 rfbi_configure(dssdev->phy.rfbi.channel,
1265 dssdev->ctrl.pixel_size,
1266 dssdev->phy.rfbi.data_lines);
1267
1268 rfbi_set_timings(dssdev->phy.rfbi.channel,
1269 &dssdev->ctrl.rfbi_timings);
1270
1271
1272 if (dssdev->driver->enable) {
1273 r = dssdev->driver->enable(dssdev);
1274 if (r)
1275 goto err2;
1276 }
1277
1278 return 0;
1279err2:
1280 omap_dispc_unregister_isr(framedone_callback, NULL,
1281 DISPC_IRQ_FRAMEDONE);
1282err1:
1283 omap_dss_stop_device(dssdev);
1284err0:
1285 return r;
1286}
1287
1288static void rfbi_display_disable(struct omap_dss_device *dssdev)
1289{
1290 dssdev->driver->disable(dssdev);
1291 omap_dispc_unregister_isr(framedone_callback, NULL,
1292 DISPC_IRQ_FRAMEDONE);
1293 omap_dss_stop_device(dssdev);
1294}
1295
1296int rfbi_init_display(struct omap_dss_device *dssdev)
1297{
1298 dssdev->enable = rfbi_display_enable;
1299 dssdev->disable = rfbi_display_disable;
1300 dssdev->update = rfbi_display_update;
1301 dssdev->sync = rfbi_display_sync;
1302 dssdev->enable_te = rfbi_display_enable_te;
1303
1304 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
1305
1306 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
1307
1308 return 0;
1309}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
new file mode 100644
index 000000000000..c24f307d3da1
--- /dev/null
+++ b/drivers/video/omap2/dss/sdi.c
@@ -0,0 +1,277 @@
1/*
2 * linux/drivers/video/omap2/dss/sdi.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#define DSS_SUBSYS_NAME "SDI"
21
22#include <linux/kernel.h>
23#include <linux/clk.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26
27#include <plat/display.h>
28#include "dss.h"
29
30static struct {
31 bool skip_init;
32 bool update_enabled;
33} sdi;
34
35static void sdi_basic_init(void)
36{
37 dispc_set_parallel_interface_mode(OMAP_DSS_PARALLELMODE_BYPASS);
38
39 dispc_set_lcd_display_type(OMAP_DSS_LCD_DISPLAY_TFT);
40 dispc_set_tft_data_lines(24);
41 dispc_lcd_enable_signal_polarity(1);
42}
43
44static int sdi_display_enable(struct omap_dss_device *dssdev)
45{
46 struct omap_video_timings *t = &dssdev->panel.timings;
47 struct dss_clock_info dss_cinfo;
48 struct dispc_clock_info dispc_cinfo;
49 u16 lck_div, pck_div;
50 unsigned long fck;
51 unsigned long pck;
52 int r;
53
54 r = omap_dss_start_device(dssdev);
55 if (r) {
56 DSSERR("failed to start device\n");
57 goto err0;
58 }
59
60 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
61 DSSERR("dssdev already enabled\n");
62 r = -EINVAL;
63 goto err1;
64 }
65
66 /* In case of skip_init sdi_init has already enabled the clocks */
67 if (!sdi.skip_init)
68 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
69
70 sdi_basic_init();
71
72 /* 15.5.9.1.2 */
73 dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF;
74
75 dispc_set_pol_freq(dssdev->panel.config, dssdev->panel.acbi,
76 dssdev->panel.acb);
77
78 if (!sdi.skip_init) {
79 r = dss_calc_clock_div(1, t->pixel_clock * 1000,
80 &dss_cinfo, &dispc_cinfo);
81 } else {
82 r = dss_get_clock_div(&dss_cinfo);
83 r = dispc_get_clock_div(&dispc_cinfo);
84 }
85
86 if (r)
87 goto err2;
88
89 fck = dss_cinfo.fck;
90 lck_div = dispc_cinfo.lck_div;
91 pck_div = dispc_cinfo.pck_div;
92
93 pck = fck / lck_div / pck_div / 1000;
94
95 if (pck != t->pixel_clock) {
96 DSSWARN("Could not find exact pixel clock. Requested %d kHz, "
97 "got %lu kHz\n",
98 t->pixel_clock, pck);
99
100 t->pixel_clock = pck;
101 }
102
103
104 dispc_set_lcd_timings(t);
105
106 r = dss_set_clock_div(&dss_cinfo);
107 if (r)
108 goto err2;
109
110 r = dispc_set_clock_div(&dispc_cinfo);
111 if (r)
112 goto err2;
113
114 if (!sdi.skip_init) {
115 dss_sdi_init(dssdev->phy.sdi.datapairs);
116 r = dss_sdi_enable();
117 if (r)
118 goto err1;
119 mdelay(2);
120 }
121
122 dispc_enable_lcd_out(1);
123
124 if (dssdev->driver->enable) {
125 r = dssdev->driver->enable(dssdev);
126 if (r)
127 goto err3;
128 }
129
130 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
131
132 sdi.skip_init = 0;
133
134 return 0;
135err3:
136 dispc_enable_lcd_out(0);
137err2:
138 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
139err1:
140 omap_dss_stop_device(dssdev);
141err0:
142 return r;
143}
144
145static int sdi_display_resume(struct omap_dss_device *dssdev);
146
147static void sdi_display_disable(struct omap_dss_device *dssdev)
148{
149 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
150 return;
151
152 if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
153 if (sdi_display_resume(dssdev))
154 return;
155
156 if (dssdev->driver->disable)
157 dssdev->driver->disable(dssdev);
158
159 dispc_enable_lcd_out(0);
160
161 dss_sdi_disable();
162
163 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
164
165 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
166
167 omap_dss_stop_device(dssdev);
168}
169
170static int sdi_display_suspend(struct omap_dss_device *dssdev)
171{
172 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
173 return -EINVAL;
174
175 if (dssdev->driver->suspend)
176 dssdev->driver->suspend(dssdev);
177
178 dispc_enable_lcd_out(0);
179
180 dss_sdi_disable();
181
182 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
183
184 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
185
186 return 0;
187}
188
189static int sdi_display_resume(struct omap_dss_device *dssdev)
190{
191 int r;
192
193 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
194 return -EINVAL;
195
196 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
197
198 r = dss_sdi_enable();
199 if (r)
200 goto err;
201 mdelay(2);
202
203 dispc_enable_lcd_out(1);
204
205 if (dssdev->driver->resume)
206 dssdev->driver->resume(dssdev);
207
208 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
209
210 return 0;
211err:
212 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
213 return r;
214}
215
216static int sdi_display_set_update_mode(struct omap_dss_device *dssdev,
217 enum omap_dss_update_mode mode)
218{
219 if (mode == OMAP_DSS_UPDATE_MANUAL)
220 return -EINVAL;
221
222 if (mode == OMAP_DSS_UPDATE_DISABLED) {
223 dispc_enable_lcd_out(0);
224 sdi.update_enabled = 0;
225 } else {
226 dispc_enable_lcd_out(1);
227 sdi.update_enabled = 1;
228 }
229
230 return 0;
231}
232
233static enum omap_dss_update_mode sdi_display_get_update_mode(
234 struct omap_dss_device *dssdev)
235{
236 return sdi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
237 OMAP_DSS_UPDATE_DISABLED;
238}
239
240static void sdi_get_timings(struct omap_dss_device *dssdev,
241 struct omap_video_timings *timings)
242{
243 *timings = dssdev->panel.timings;
244}
245
246int sdi_init_display(struct omap_dss_device *dssdev)
247{
248 DSSDBG("SDI init\n");
249
250 dssdev->enable = sdi_display_enable;
251 dssdev->disable = sdi_display_disable;
252 dssdev->suspend = sdi_display_suspend;
253 dssdev->resume = sdi_display_resume;
254 dssdev->set_update_mode = sdi_display_set_update_mode;
255 dssdev->get_update_mode = sdi_display_get_update_mode;
256 dssdev->get_timings = sdi_get_timings;
257
258 return 0;
259}
260
261int sdi_init(bool skip_init)
262{
263 /* we store this for first display enable, then clear it */
264 sdi.skip_init = skip_init;
265
266 /*
267 * Enable clocks already here, otherwise there would be a toggle
268 * of them until sdi_display_enable is called.
269 */
270 if (skip_init)
271 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
272 return 0;
273}
274
275void sdi_exit(void)
276{
277}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
new file mode 100644
index 000000000000..749a5a0f5be4
--- /dev/null
+++ b/drivers/video/omap2/dss/venc.c
@@ -0,0 +1,797 @@
1/*
2 * linux/drivers/video/omap2/dss/venc.c
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * VENC settings from TI's DSS driver
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published by
11 * the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#define DSS_SUBSYS_NAME "VENC"
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/clk.h>
27#include <linux/err.h>
28#include <linux/io.h>
29#include <linux/mutex.h>
30#include <linux/completion.h>
31#include <linux/delay.h>
32#include <linux/string.h>
33#include <linux/seq_file.h>
34#include <linux/platform_device.h>
35#include <linux/regulator/consumer.h>
36
37#include <plat/display.h>
38#include <plat/cpu.h>
39
40#include "dss.h"
41
42#define VENC_BASE 0x48050C00
43
44/* Venc registers */
45#define VENC_REV_ID 0x00
46#define VENC_STATUS 0x04
47#define VENC_F_CONTROL 0x08
48#define VENC_VIDOUT_CTRL 0x10
49#define VENC_SYNC_CTRL 0x14
50#define VENC_LLEN 0x1C
51#define VENC_FLENS 0x20
52#define VENC_HFLTR_CTRL 0x24
53#define VENC_CC_CARR_WSS_CARR 0x28
54#define VENC_C_PHASE 0x2C
55#define VENC_GAIN_U 0x30
56#define VENC_GAIN_V 0x34
57#define VENC_GAIN_Y 0x38
58#define VENC_BLACK_LEVEL 0x3C
59#define VENC_BLANK_LEVEL 0x40
60#define VENC_X_COLOR 0x44
61#define VENC_M_CONTROL 0x48
62#define VENC_BSTAMP_WSS_DATA 0x4C
63#define VENC_S_CARR 0x50
64#define VENC_LINE21 0x54
65#define VENC_LN_SEL 0x58
66#define VENC_L21__WC_CTL 0x5C
67#define VENC_HTRIGGER_VTRIGGER 0x60
68#define VENC_SAVID__EAVID 0x64
69#define VENC_FLEN__FAL 0x68
70#define VENC_LAL__PHASE_RESET 0x6C
71#define VENC_HS_INT_START_STOP_X 0x70
72#define VENC_HS_EXT_START_STOP_X 0x74
73#define VENC_VS_INT_START_X 0x78
74#define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C
75#define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80
76#define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84
77#define VENC_VS_EXT_STOP_Y 0x88
78#define VENC_AVID_START_STOP_X 0x90
79#define VENC_AVID_START_STOP_Y 0x94
80#define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0
81#define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4
82#define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8
83#define VENC_TVDETGP_INT_START_STOP_X 0xB0
84#define VENC_TVDETGP_INT_START_STOP_Y 0xB4
85#define VENC_GEN_CTRL 0xB8
86#define VENC_OUTPUT_CONTROL 0xC4
87#define VENC_OUTPUT_TEST 0xC8
88#define VENC_DAC_B__DAC_C 0xC8
89
90struct venc_config {
91 u32 f_control;
92 u32 vidout_ctrl;
93 u32 sync_ctrl;
94 u32 llen;
95 u32 flens;
96 u32 hfltr_ctrl;
97 u32 cc_carr_wss_carr;
98 u32 c_phase;
99 u32 gain_u;
100 u32 gain_v;
101 u32 gain_y;
102 u32 black_level;
103 u32 blank_level;
104 u32 x_color;
105 u32 m_control;
106 u32 bstamp_wss_data;
107 u32 s_carr;
108 u32 line21;
109 u32 ln_sel;
110 u32 l21__wc_ctl;
111 u32 htrigger_vtrigger;
112 u32 savid__eavid;
113 u32 flen__fal;
114 u32 lal__phase_reset;
115 u32 hs_int_start_stop_x;
116 u32 hs_ext_start_stop_x;
117 u32 vs_int_start_x;
118 u32 vs_int_stop_x__vs_int_start_y;
119 u32 vs_int_stop_y__vs_ext_start_x;
120 u32 vs_ext_stop_x__vs_ext_start_y;
121 u32 vs_ext_stop_y;
122 u32 avid_start_stop_x;
123 u32 avid_start_stop_y;
124 u32 fid_int_start_x__fid_int_start_y;
125 u32 fid_int_offset_y__fid_ext_start_x;
126 u32 fid_ext_start_y__fid_ext_offset_y;
127 u32 tvdetgp_int_start_stop_x;
128 u32 tvdetgp_int_start_stop_y;
129 u32 gen_ctrl;
130};
131
132/* from TRM */
133static const struct venc_config venc_config_pal_trm = {
134 .f_control = 0,
135 .vidout_ctrl = 1,
136 .sync_ctrl = 0x40,
137 .llen = 0x35F, /* 863 */
138 .flens = 0x270, /* 624 */
139 .hfltr_ctrl = 0,
140 .cc_carr_wss_carr = 0x2F7225ED,
141 .c_phase = 0,
142 .gain_u = 0x111,
143 .gain_v = 0x181,
144 .gain_y = 0x140,
145 .black_level = 0x3B,
146 .blank_level = 0x3B,
147 .x_color = 0x7,
148 .m_control = 0x2,
149 .bstamp_wss_data = 0x3F,
150 .s_carr = 0x2A098ACB,
151 .line21 = 0,
152 .ln_sel = 0x01290015,
153 .l21__wc_ctl = 0x0000F603,
154 .htrigger_vtrigger = 0,
155
156 .savid__eavid = 0x06A70108,
157 .flen__fal = 0x00180270,
158 .lal__phase_reset = 0x00040135,
159 .hs_int_start_stop_x = 0x00880358,
160 .hs_ext_start_stop_x = 0x000F035F,
161 .vs_int_start_x = 0x01A70000,
162 .vs_int_stop_x__vs_int_start_y = 0x000001A7,
163 .vs_int_stop_y__vs_ext_start_x = 0x01AF0000,
164 .vs_ext_stop_x__vs_ext_start_y = 0x000101AF,
165 .vs_ext_stop_y = 0x00000025,
166 .avid_start_stop_x = 0x03530083,
167 .avid_start_stop_y = 0x026C002E,
168 .fid_int_start_x__fid_int_start_y = 0x0001008A,
169 .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
170 .fid_ext_start_y__fid_ext_offset_y = 0x01380001,
171
172 .tvdetgp_int_start_stop_x = 0x00140001,
173 .tvdetgp_int_start_stop_y = 0x00010001,
174 .gen_ctrl = 0x00FF0000,
175};
176
177/* from TRM */
178static const struct venc_config venc_config_ntsc_trm = {
179 .f_control = 0,
180 .vidout_ctrl = 1,
181 .sync_ctrl = 0x8040,
182 .llen = 0x359,
183 .flens = 0x20C,
184 .hfltr_ctrl = 0,
185 .cc_carr_wss_carr = 0x043F2631,
186 .c_phase = 0,
187 .gain_u = 0x102,
188 .gain_v = 0x16C,
189 .gain_y = 0x12F,
190 .black_level = 0x43,
191 .blank_level = 0x38,
192 .x_color = 0x7,
193 .m_control = 0x1,
194 .bstamp_wss_data = 0x38,
195 .s_carr = 0x21F07C1F,
196 .line21 = 0,
197 .ln_sel = 0x01310011,
198 .l21__wc_ctl = 0x0000F003,
199 .htrigger_vtrigger = 0,
200
201 .savid__eavid = 0x069300F4,
202 .flen__fal = 0x0016020C,
203 .lal__phase_reset = 0x00060107,
204 .hs_int_start_stop_x = 0x008E0350,
205 .hs_ext_start_stop_x = 0x000F0359,
206 .vs_int_start_x = 0x01A00000,
207 .vs_int_stop_x__vs_int_start_y = 0x020701A0,
208 .vs_int_stop_y__vs_ext_start_x = 0x01AC0024,
209 .vs_ext_stop_x__vs_ext_start_y = 0x020D01AC,
210 .vs_ext_stop_y = 0x00000006,
211 .avid_start_stop_x = 0x03480078,
212 .avid_start_stop_y = 0x02060024,
213 .fid_int_start_x__fid_int_start_y = 0x0001008A,
214 .fid_int_offset_y__fid_ext_start_x = 0x01AC0106,
215 .fid_ext_start_y__fid_ext_offset_y = 0x01060006,
216
217 .tvdetgp_int_start_stop_x = 0x00140001,
218 .tvdetgp_int_start_stop_y = 0x00010001,
219 .gen_ctrl = 0x00F90000,
220};
221
222static const struct venc_config venc_config_pal_bdghi = {
223 .f_control = 0,
224 .vidout_ctrl = 0,
225 .sync_ctrl = 0,
226 .hfltr_ctrl = 0,
227 .x_color = 0,
228 .line21 = 0,
229 .ln_sel = 21,
230 .htrigger_vtrigger = 0,
231 .tvdetgp_int_start_stop_x = 0x00140001,
232 .tvdetgp_int_start_stop_y = 0x00010001,
233 .gen_ctrl = 0x00FB0000,
234
235 .llen = 864-1,
236 .flens = 625-1,
237 .cc_carr_wss_carr = 0x2F7625ED,
238 .c_phase = 0xDF,
239 .gain_u = 0x111,
240 .gain_v = 0x181,
241 .gain_y = 0x140,
242 .black_level = 0x3e,
243 .blank_level = 0x3e,
244 .m_control = 0<<2 | 1<<1,
245 .bstamp_wss_data = 0x42,
246 .s_carr = 0x2a098acb,
247 .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0,
248 .savid__eavid = 0x06A70108,
249 .flen__fal = 23<<16 | 624<<0,
250 .lal__phase_reset = 2<<17 | 310<<0,
251 .hs_int_start_stop_x = 0x00920358,
252 .hs_ext_start_stop_x = 0x000F035F,
253 .vs_int_start_x = 0x1a7<<16,
254 .vs_int_stop_x__vs_int_start_y = 0x000601A7,
255 .vs_int_stop_y__vs_ext_start_x = 0x01AF0036,
256 .vs_ext_stop_x__vs_ext_start_y = 0x27101af,
257 .vs_ext_stop_y = 0x05,
258 .avid_start_stop_x = 0x03530082,
259 .avid_start_stop_y = 0x0270002E,
260 .fid_int_start_x__fid_int_start_y = 0x0005008A,
261 .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
262 .fid_ext_start_y__fid_ext_offset_y = 0x01380005,
263};
264
265const struct omap_video_timings omap_dss_pal_timings = {
266 .x_res = 720,
267 .y_res = 574,
268 .pixel_clock = 13500,
269 .hsw = 64,
270 .hfp = 12,
271 .hbp = 68,
272 .vsw = 5,
273 .vfp = 5,
274 .vbp = 41,
275};
276EXPORT_SYMBOL(omap_dss_pal_timings);
277
278const struct omap_video_timings omap_dss_ntsc_timings = {
279 .x_res = 720,
280 .y_res = 482,
281 .pixel_clock = 13500,
282 .hsw = 64,
283 .hfp = 16,
284 .hbp = 58,
285 .vsw = 6,
286 .vfp = 6,
287 .vbp = 31,
288};
289EXPORT_SYMBOL(omap_dss_ntsc_timings);
290
291static struct {
292 void __iomem *base;
293 struct mutex venc_lock;
294 u32 wss_data;
295 struct regulator *vdda_dac_reg;
296} venc;
297
298static inline void venc_write_reg(int idx, u32 val)
299{
300 __raw_writel(val, venc.base + idx);
301}
302
303static inline u32 venc_read_reg(int idx)
304{
305 u32 l = __raw_readl(venc.base + idx);
306 return l;
307}
308
309static void venc_write_config(const struct venc_config *config)
310{
311 DSSDBG("write venc conf\n");
312
313 venc_write_reg(VENC_LLEN, config->llen);
314 venc_write_reg(VENC_FLENS, config->flens);
315 venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
316 venc_write_reg(VENC_C_PHASE, config->c_phase);
317 venc_write_reg(VENC_GAIN_U, config->gain_u);
318 venc_write_reg(VENC_GAIN_V, config->gain_v);
319 venc_write_reg(VENC_GAIN_Y, config->gain_y);
320 venc_write_reg(VENC_BLACK_LEVEL, config->black_level);
321 venc_write_reg(VENC_BLANK_LEVEL, config->blank_level);
322 venc_write_reg(VENC_M_CONTROL, config->m_control);
323 venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
324 venc.wss_data);
325 venc_write_reg(VENC_S_CARR, config->s_carr);
326 venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl);
327 venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid);
328 venc_write_reg(VENC_FLEN__FAL, config->flen__fal);
329 venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset);
330 venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x);
331 venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x);
332 venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x);
333 venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y,
334 config->vs_int_stop_x__vs_int_start_y);
335 venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X,
336 config->vs_int_stop_y__vs_ext_start_x);
337 venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
338 config->vs_ext_stop_x__vs_ext_start_y);
339 venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
340 venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x);
341 venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
342 venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y,
343 config->fid_int_start_x__fid_int_start_y);
344 venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
345 config->fid_int_offset_y__fid_ext_start_x);
346 venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
347 config->fid_ext_start_y__fid_ext_offset_y);
348
349 venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C));
350 venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl);
351 venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl);
352 venc_write_reg(VENC_X_COLOR, config->x_color);
353 venc_write_reg(VENC_LINE21, config->line21);
354 venc_write_reg(VENC_LN_SEL, config->ln_sel);
355 venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
356 venc_write_reg(VENC_TVDETGP_INT_START_STOP_X,
357 config->tvdetgp_int_start_stop_x);
358 venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y,
359 config->tvdetgp_int_start_stop_y);
360 venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl);
361 venc_write_reg(VENC_F_CONTROL, config->f_control);
362 venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl);
363}
364
365static void venc_reset(void)
366{
367 int t = 1000;
368
369 venc_write_reg(VENC_F_CONTROL, 1<<8);
370 while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) {
371 if (--t == 0) {
372 DSSERR("Failed to reset venc\n");
373 return;
374 }
375 }
376
377 /* the magical sleep that makes things work */
378 msleep(20);
379}
380
381static void venc_enable_clocks(int enable)
382{
383 if (enable)
384 dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
385 DSS_CLK_96M);
386 else
387 dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1 | DSS_CLK_54M |
388 DSS_CLK_96M);
389}
390
391static const struct venc_config *venc_timings_to_config(
392 struct omap_video_timings *timings)
393{
394 if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
395 return &venc_config_pal_trm;
396
397 if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
398 return &venc_config_ntsc_trm;
399
400 BUG();
401}
402
403
404
405
406
407/* driver */
408static int venc_panel_probe(struct omap_dss_device *dssdev)
409{
410 dssdev->panel.timings = omap_dss_pal_timings;
411
412 return 0;
413}
414
415static void venc_panel_remove(struct omap_dss_device *dssdev)
416{
417}
418
419static int venc_panel_enable(struct omap_dss_device *dssdev)
420{
421 int r = 0;
422
423 /* wait couple of vsyncs until enabling the LCD */
424 msleep(50);
425
426 if (dssdev->platform_enable)
427 r = dssdev->platform_enable(dssdev);
428
429 return r;
430}
431
432static void venc_panel_disable(struct omap_dss_device *dssdev)
433{
434 if (dssdev->platform_disable)
435 dssdev->platform_disable(dssdev);
436
437 /* wait at least 5 vsyncs after disabling the LCD */
438
439 msleep(100);
440}
441
442static int venc_panel_suspend(struct omap_dss_device *dssdev)
443{
444 venc_panel_disable(dssdev);
445 return 0;
446}
447
448static int venc_panel_resume(struct omap_dss_device *dssdev)
449{
450 return venc_panel_enable(dssdev);
451}
452
453static struct omap_dss_driver venc_driver = {
454 .probe = venc_panel_probe,
455 .remove = venc_panel_remove,
456
457 .enable = venc_panel_enable,
458 .disable = venc_panel_disable,
459 .suspend = venc_panel_suspend,
460 .resume = venc_panel_resume,
461
462 .driver = {
463 .name = "venc",
464 .owner = THIS_MODULE,
465 },
466};
467/* driver end */
468
469
470
471int venc_init(struct platform_device *pdev)
472{
473 u8 rev_id;
474
475 mutex_init(&venc.venc_lock);
476
477 venc.wss_data = 0;
478
479 venc.base = ioremap(VENC_BASE, SZ_1K);
480 if (!venc.base) {
481 DSSERR("can't ioremap VENC\n");
482 return -ENOMEM;
483 }
484
485 venc.vdda_dac_reg = regulator_get(&pdev->dev, "vdda_dac");
486 if (IS_ERR(venc.vdda_dac_reg)) {
487 iounmap(venc.base);
488 DSSERR("can't get VDDA_DAC regulator\n");
489 return PTR_ERR(venc.vdda_dac_reg);
490 }
491
492 venc_enable_clocks(1);
493
494 rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
495 printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
496
497 venc_enable_clocks(0);
498
499 return omap_dss_register_driver(&venc_driver);
500}
501
502void venc_exit(void)
503{
504 omap_dss_unregister_driver(&venc_driver);
505
506 regulator_put(venc.vdda_dac_reg);
507
508 iounmap(venc.base);
509}
510
511static void venc_power_on(struct omap_dss_device *dssdev)
512{
513 u32 l;
514
515 venc_enable_clocks(1);
516
517 venc_reset();
518 venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
519
520 dss_set_venc_output(dssdev->phy.venc.type);
521 dss_set_dac_pwrdn_bgz(1);
522
523 l = 0;
524
525 if (dssdev->phy.venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE)
526 l |= 1 << 1;
527 else /* S-Video */
528 l |= (1 << 0) | (1 << 2);
529
530 if (dssdev->phy.venc.invert_polarity == false)
531 l |= 1 << 3;
532
533 venc_write_reg(VENC_OUTPUT_CONTROL, l);
534
535 dispc_set_digit_size(dssdev->panel.timings.x_res,
536 dssdev->panel.timings.y_res/2);
537
538 regulator_enable(venc.vdda_dac_reg);
539
540 if (dssdev->platform_enable)
541 dssdev->platform_enable(dssdev);
542
543 dispc_enable_digit_out(1);
544}
545
546static void venc_power_off(struct omap_dss_device *dssdev)
547{
548 venc_write_reg(VENC_OUTPUT_CONTROL, 0);
549 dss_set_dac_pwrdn_bgz(0);
550
551 dispc_enable_digit_out(0);
552
553 if (dssdev->platform_disable)
554 dssdev->platform_disable(dssdev);
555
556 regulator_disable(venc.vdda_dac_reg);
557
558 venc_enable_clocks(0);
559}
560
561static int venc_enable_display(struct omap_dss_device *dssdev)
562{
563 int r = 0;
564
565 DSSDBG("venc_enable_display\n");
566
567 mutex_lock(&venc.venc_lock);
568
569 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
570 r = -EINVAL;
571 goto err;
572 }
573
574 venc_power_on(dssdev);
575
576 venc.wss_data = 0;
577
578 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
579err:
580 mutex_unlock(&venc.venc_lock);
581
582 return r;
583}
584
585static void venc_disable_display(struct omap_dss_device *dssdev)
586{
587 DSSDBG("venc_disable_display\n");
588
589 mutex_lock(&venc.venc_lock);
590
591 if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
592 goto end;
593
594 if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) {
595 /* suspended is the same as disabled with venc */
596 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
597 goto end;
598 }
599
600 venc_power_off(dssdev);
601
602 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
603end:
604 mutex_unlock(&venc.venc_lock);
605}
606
607static int venc_display_suspend(struct omap_dss_device *dssdev)
608{
609 int r = 0;
610
611 DSSDBG("venc_display_suspend\n");
612
613 mutex_lock(&venc.venc_lock);
614
615 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
616 r = -EINVAL;
617 goto err;
618 }
619
620 venc_power_off(dssdev);
621
622 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
623err:
624 mutex_unlock(&venc.venc_lock);
625
626 return r;
627}
628
629static int venc_display_resume(struct omap_dss_device *dssdev)
630{
631 int r = 0;
632
633 DSSDBG("venc_display_resume\n");
634
635 mutex_lock(&venc.venc_lock);
636
637 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
638 r = -EINVAL;
639 goto err;
640 }
641
642 venc_power_on(dssdev);
643
644 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
645err:
646 mutex_unlock(&venc.venc_lock);
647
648 return r;
649}
650
651static void venc_get_timings(struct omap_dss_device *dssdev,
652 struct omap_video_timings *timings)
653{
654 *timings = dssdev->panel.timings;
655}
656
657static void venc_set_timings(struct omap_dss_device *dssdev,
658 struct omap_video_timings *timings)
659{
660 DSSDBG("venc_set_timings\n");
661
662 /* Reset WSS data when the TV standard changes. */
663 if (memcmp(&dssdev->panel.timings, timings, sizeof(*timings)))
664 venc.wss_data = 0;
665
666 dssdev->panel.timings = *timings;
667 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
668 /* turn the venc off and on to get new timings to use */
669 venc_disable_display(dssdev);
670 venc_enable_display(dssdev);
671 }
672}
673
674static int venc_check_timings(struct omap_dss_device *dssdev,
675 struct omap_video_timings *timings)
676{
677 DSSDBG("venc_check_timings\n");
678
679 if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0)
680 return 0;
681
682 if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0)
683 return 0;
684
685 return -EINVAL;
686}
687
688static u32 venc_get_wss(struct omap_dss_device *dssdev)
689{
690 /* Invert due to VENC_L21_WC_CTL:INV=1 */
691 return (venc.wss_data >> 8) ^ 0xfffff;
692}
693
694static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
695{
696 const struct venc_config *config;
697
698 DSSDBG("venc_set_wss\n");
699
700 mutex_lock(&venc.venc_lock);
701
702 config = venc_timings_to_config(&dssdev->panel.timings);
703
704 /* Invert due to VENC_L21_WC_CTL:INV=1 */
705 venc.wss_data = (wss ^ 0xfffff) << 8;
706
707 venc_enable_clocks(1);
708
709 venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
710 venc.wss_data);
711
712 venc_enable_clocks(0);
713
714 mutex_unlock(&venc.venc_lock);
715
716 return 0;
717}
718
719static enum omap_dss_update_mode venc_display_get_update_mode(
720 struct omap_dss_device *dssdev)
721{
722 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
723 return OMAP_DSS_UPDATE_AUTO;
724 else
725 return OMAP_DSS_UPDATE_DISABLED;
726}
727
728int venc_init_display(struct omap_dss_device *dssdev)
729{
730 DSSDBG("init_display\n");
731
732 dssdev->enable = venc_enable_display;
733 dssdev->disable = venc_disable_display;
734 dssdev->suspend = venc_display_suspend;
735 dssdev->resume = venc_display_resume;
736 dssdev->get_timings = venc_get_timings;
737 dssdev->set_timings = venc_set_timings;
738 dssdev->check_timings = venc_check_timings;
739 dssdev->get_wss = venc_get_wss;
740 dssdev->set_wss = venc_set_wss;
741 dssdev->get_update_mode = venc_display_get_update_mode;
742
743 return 0;
744}
745
746void venc_dump_regs(struct seq_file *s)
747{
748#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
749
750 venc_enable_clocks(1);
751
752 DUMPREG(VENC_F_CONTROL);
753 DUMPREG(VENC_VIDOUT_CTRL);
754 DUMPREG(VENC_SYNC_CTRL);
755 DUMPREG(VENC_LLEN);
756 DUMPREG(VENC_FLENS);
757 DUMPREG(VENC_HFLTR_CTRL);
758 DUMPREG(VENC_CC_CARR_WSS_CARR);
759 DUMPREG(VENC_C_PHASE);
760 DUMPREG(VENC_GAIN_U);
761 DUMPREG(VENC_GAIN_V);
762 DUMPREG(VENC_GAIN_Y);
763 DUMPREG(VENC_BLACK_LEVEL);
764 DUMPREG(VENC_BLANK_LEVEL);
765 DUMPREG(VENC_X_COLOR);
766 DUMPREG(VENC_M_CONTROL);
767 DUMPREG(VENC_BSTAMP_WSS_DATA);
768 DUMPREG(VENC_S_CARR);
769 DUMPREG(VENC_LINE21);
770 DUMPREG(VENC_LN_SEL);
771 DUMPREG(VENC_L21__WC_CTL);
772 DUMPREG(VENC_HTRIGGER_VTRIGGER);
773 DUMPREG(VENC_SAVID__EAVID);
774 DUMPREG(VENC_FLEN__FAL);
775 DUMPREG(VENC_LAL__PHASE_RESET);
776 DUMPREG(VENC_HS_INT_START_STOP_X);
777 DUMPREG(VENC_HS_EXT_START_STOP_X);
778 DUMPREG(VENC_VS_INT_START_X);
779 DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y);
780 DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X);
781 DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
782 DUMPREG(VENC_VS_EXT_STOP_Y);
783 DUMPREG(VENC_AVID_START_STOP_X);
784 DUMPREG(VENC_AVID_START_STOP_Y);
785 DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y);
786 DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
787 DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
788 DUMPREG(VENC_TVDETGP_INT_START_STOP_X);
789 DUMPREG(VENC_TVDETGP_INT_START_STOP_Y);
790 DUMPREG(VENC_GEN_CTRL);
791 DUMPREG(VENC_OUTPUT_CONTROL);
792 DUMPREG(VENC_OUTPUT_TEST);
793
794 venc_enable_clocks(0);
795
796#undef DUMPREG
797}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
new file mode 100644
index 000000000000..bb694cc52a50
--- /dev/null
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -0,0 +1,37 @@
1menuconfig FB_OMAP2
2 tristate "OMAP2/3 frame buffer support (EXPERIMENTAL)"
3 depends on FB && OMAP2_DSS
4
5 select OMAP2_VRAM
6 select OMAP2_VRFB
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 help
11 Frame buffer driver for OMAP2/3 based boards.
12
13config FB_OMAP2_DEBUG_SUPPORT
14 bool "Debug support for OMAP2/3 FB"
15 default y
16 depends on FB_OMAP2
17 help
18 Support for debug output. You have to enable the actual printing
19 with debug module parameter.
20
21config FB_OMAP2_FORCE_AUTO_UPDATE
22 bool "Force main display to automatic update mode"
23 depends on FB_OMAP2
24 help
25 Forces main display to automatic update mode (if possible),
26 and also enables tearsync (if possible). By default
27 displays that support manual update are started in manual
28 update mode.
29
30config FB_OMAP2_NUM_FBS
31 int "Number of framebuffers"
32 range 1 10
33 default 3
34 depends on FB_OMAP2
35 help
36 Select the number of framebuffers created. OMAP2/3 has 3 overlays
37 so normally this would be 3.
diff --git a/drivers/video/omap2/omapfb/Makefile b/drivers/video/omap2/omapfb/Makefile
new file mode 100644
index 000000000000..51c2e00d9bf8
--- /dev/null
+++ b/drivers/video/omap2/omapfb/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_FB_OMAP2) += omapfb.o
2omapfb-y := omapfb-main.o omapfb-sysfs.o omapfb-ioctl.o
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
new file mode 100644
index 000000000000..4c4bafdfaa43
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -0,0 +1,755 @@
1/*
2 * linux/drivers/video/omap2/omapfb-ioctl.c
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/fb.h>
24#include <linux/device.h>
25#include <linux/uaccess.h>
26#include <linux/platform_device.h>
27#include <linux/mm.h>
28#include <linux/omapfb.h>
29#include <linux/vmalloc.h>
30
31#include <plat/display.h>
32#include <plat/vrfb.h>
33#include <plat/vram.h>
34
35#include "omapfb.h"
36
37static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
38{
39 struct omapfb_info *ofbi = FB2OFB(fbi);
40 struct omapfb2_device *fbdev = ofbi->fbdev;
41 struct omap_overlay *ovl;
42 struct omap_overlay_info info;
43 int r = 0;
44
45 DBG("omapfb_setup_plane\n");
46
47 if (ofbi->num_overlays != 1) {
48 r = -EINVAL;
49 goto out;
50 }
51
52 /* XXX uses only the first overlay */
53 ovl = ofbi->overlays[0];
54
55 if (pi->enabled && !ofbi->region.size) {
56 /*
57 * This plane's memory was freed, can't enable it
58 * until it's reallocated.
59 */
60 r = -EINVAL;
61 goto out;
62 }
63
64 ovl->get_overlay_info(ovl, &info);
65
66 info.pos_x = pi->pos_x;
67 info.pos_y = pi->pos_y;
68 info.out_width = pi->out_width;
69 info.out_height = pi->out_height;
70 info.enabled = pi->enabled;
71
72 r = ovl->set_overlay_info(ovl, &info);
73 if (r)
74 goto out;
75
76 if (ovl->manager) {
77 r = ovl->manager->apply(ovl->manager);
78 if (r)
79 goto out;
80 }
81
82out:
83 if (r)
84 dev_err(fbdev->dev, "setup_plane failed\n");
85 return r;
86}
87
88static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
89{
90 struct omapfb_info *ofbi = FB2OFB(fbi);
91
92 if (ofbi->num_overlays != 1) {
93 memset(pi, 0, sizeof(*pi));
94 } else {
95 struct omap_overlay_info *ovli;
96 struct omap_overlay *ovl;
97
98 ovl = ofbi->overlays[0];
99 ovli = &ovl->info;
100
101 pi->pos_x = ovli->pos_x;
102 pi->pos_y = ovli->pos_y;
103 pi->enabled = ovli->enabled;
104 pi->channel_out = 0; /* xxx */
105 pi->mirror = 0;
106 pi->out_width = ovli->out_width;
107 pi->out_height = ovli->out_height;
108 }
109
110 return 0;
111}
112
113static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
114{
115 struct omapfb_info *ofbi = FB2OFB(fbi);
116 struct omapfb2_device *fbdev = ofbi->fbdev;
117 struct omapfb2_mem_region *rg;
118 int r, i;
119 size_t size;
120
121 if (mi->type > OMAPFB_MEMTYPE_MAX)
122 return -EINVAL;
123
124 size = PAGE_ALIGN(mi->size);
125
126 rg = &ofbi->region;
127
128 for (i = 0; i < ofbi->num_overlays; i++) {
129 if (ofbi->overlays[i]->info.enabled)
130 return -EBUSY;
131 }
132
133 if (rg->size != size || rg->type != mi->type) {
134 r = omapfb_realloc_fbmem(fbi, size, mi->type);
135 if (r) {
136 dev_err(fbdev->dev, "realloc fbmem failed\n");
137 return r;
138 }
139 }
140
141 return 0;
142}
143
144static int omapfb_query_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
145{
146 struct omapfb_info *ofbi = FB2OFB(fbi);
147 struct omapfb2_mem_region *rg;
148
149 rg = &ofbi->region;
150 memset(mi, 0, sizeof(*mi));
151
152 mi->size = rg->size;
153 mi->type = rg->type;
154
155 return 0;
156}
157
158static int omapfb_update_window_nolock(struct fb_info *fbi,
159 u32 x, u32 y, u32 w, u32 h)
160{
161 struct omap_dss_device *display = fb2display(fbi);
162 u16 dw, dh;
163
164 if (!display)
165 return 0;
166
167 if (w == 0 || h == 0)
168 return 0;
169
170 display->get_resolution(display, &dw, &dh);
171
172 if (x + w > dw || y + h > dh)
173 return -EINVAL;
174
175 return display->update(display, x, y, w, h);
176}
177
178/* This function is exported for SGX driver use */
179int omapfb_update_window(struct fb_info *fbi,
180 u32 x, u32 y, u32 w, u32 h)
181{
182 struct omapfb_info *ofbi = FB2OFB(fbi);
183 struct omapfb2_device *fbdev = ofbi->fbdev;
184 int r;
185
186 omapfb_lock(fbdev);
187 lock_fb_info(fbi);
188
189 r = omapfb_update_window_nolock(fbi, x, y, w, h);
190
191 unlock_fb_info(fbi);
192 omapfb_unlock(fbdev);
193
194 return r;
195}
196EXPORT_SYMBOL(omapfb_update_window);
197
198static int omapfb_set_update_mode(struct fb_info *fbi,
199 enum omapfb_update_mode mode)
200{
201 struct omap_dss_device *display = fb2display(fbi);
202 enum omap_dss_update_mode um;
203 int r;
204
205 if (!display || !display->set_update_mode)
206 return -EINVAL;
207
208 switch (mode) {
209 case OMAPFB_UPDATE_DISABLED:
210 um = OMAP_DSS_UPDATE_DISABLED;
211 break;
212
213 case OMAPFB_AUTO_UPDATE:
214 um = OMAP_DSS_UPDATE_AUTO;
215 break;
216
217 case OMAPFB_MANUAL_UPDATE:
218 um = OMAP_DSS_UPDATE_MANUAL;
219 break;
220
221 default:
222 return -EINVAL;
223 }
224
225 r = display->set_update_mode(display, um);
226
227 return r;
228}
229
230static int omapfb_get_update_mode(struct fb_info *fbi,
231 enum omapfb_update_mode *mode)
232{
233 struct omap_dss_device *display = fb2display(fbi);
234 enum omap_dss_update_mode m;
235
236 if (!display || !display->get_update_mode)
237 return -EINVAL;
238
239 m = display->get_update_mode(display);
240
241 switch (m) {
242 case OMAP_DSS_UPDATE_DISABLED:
243 *mode = OMAPFB_UPDATE_DISABLED;
244 break;
245 case OMAP_DSS_UPDATE_AUTO:
246 *mode = OMAPFB_AUTO_UPDATE;
247 break;
248 case OMAP_DSS_UPDATE_MANUAL:
249 *mode = OMAPFB_MANUAL_UPDATE;
250 break;
251 default:
252 BUG();
253 }
254
255 return 0;
256}
257
258/* XXX this color key handling is a hack... */
259static struct omapfb_color_key omapfb_color_keys[2];
260
261static int _omapfb_set_color_key(struct omap_overlay_manager *mgr,
262 struct omapfb_color_key *ck)
263{
264 struct omap_overlay_manager_info info;
265 enum omap_dss_trans_key_type kt;
266 int r;
267
268 mgr->get_manager_info(mgr, &info);
269
270 if (ck->key_type == OMAPFB_COLOR_KEY_DISABLED) {
271 info.trans_enabled = false;
272 omapfb_color_keys[mgr->id] = *ck;
273
274 r = mgr->set_manager_info(mgr, &info);
275 if (r)
276 return r;
277
278 r = mgr->apply(mgr);
279
280 return r;
281 }
282
283 switch (ck->key_type) {
284 case OMAPFB_COLOR_KEY_GFX_DST:
285 kt = OMAP_DSS_COLOR_KEY_GFX_DST;
286 break;
287 case OMAPFB_COLOR_KEY_VID_SRC:
288 kt = OMAP_DSS_COLOR_KEY_VID_SRC;
289 break;
290 default:
291 return -EINVAL;
292 }
293
294 info.default_color = ck->background;
295 info.trans_key = ck->trans_key;
296 info.trans_key_type = kt;
297 info.trans_enabled = true;
298
299 omapfb_color_keys[mgr->id] = *ck;
300
301 r = mgr->set_manager_info(mgr, &info);
302 if (r)
303 return r;
304
305 r = mgr->apply(mgr);
306
307 return r;
308}
309
310static int omapfb_set_color_key(struct fb_info *fbi,
311 struct omapfb_color_key *ck)
312{
313 struct omapfb_info *ofbi = FB2OFB(fbi);
314 struct omapfb2_device *fbdev = ofbi->fbdev;
315 int r;
316 int i;
317 struct omap_overlay_manager *mgr = NULL;
318
319 omapfb_lock(fbdev);
320
321 for (i = 0; i < ofbi->num_overlays; i++) {
322 if (ofbi->overlays[i]->manager) {
323 mgr = ofbi->overlays[i]->manager;
324 break;
325 }
326 }
327
328 if (!mgr) {
329 r = -EINVAL;
330 goto err;
331 }
332
333 r = _omapfb_set_color_key(mgr, ck);
334err:
335 omapfb_unlock(fbdev);
336
337 return r;
338}
339
340static int omapfb_get_color_key(struct fb_info *fbi,
341 struct omapfb_color_key *ck)
342{
343 struct omapfb_info *ofbi = FB2OFB(fbi);
344 struct omapfb2_device *fbdev = ofbi->fbdev;
345 struct omap_overlay_manager *mgr = NULL;
346 int r = 0;
347 int i;
348
349 omapfb_lock(fbdev);
350
351 for (i = 0; i < ofbi->num_overlays; i++) {
352 if (ofbi->overlays[i]->manager) {
353 mgr = ofbi->overlays[i]->manager;
354 break;
355 }
356 }
357
358 if (!mgr) {
359 r = -EINVAL;
360 goto err;
361 }
362
363 *ck = omapfb_color_keys[mgr->id];
364err:
365 omapfb_unlock(fbdev);
366
367 return r;
368}
369
370static int omapfb_memory_read(struct fb_info *fbi,
371 struct omapfb_memory_read *mr)
372{
373 struct omap_dss_device *display = fb2display(fbi);
374 void *buf;
375 int r;
376
377 if (!display || !display->memory_read)
378 return -ENOENT;
379
380 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
381 return -EFAULT;
382
383 if (mr->w * mr->h * 3 > mr->buffer_size)
384 return -EINVAL;
385
386 buf = vmalloc(mr->buffer_size);
387 if (!buf) {
388 DBG("vmalloc failed\n");
389 return -ENOMEM;
390 }
391
392 r = display->memory_read(display, buf, mr->buffer_size,
393 mr->x, mr->y, mr->w, mr->h);
394
395 if (r > 0) {
396 if (copy_to_user(mr->buffer, buf, mr->buffer_size))
397 r = -EFAULT;
398 }
399
400 vfree(buf);
401
402 return r;
403}
404
405static int omapfb_get_ovl_colormode(struct omapfb2_device *fbdev,
406 struct omapfb_ovl_colormode *mode)
407{
408 int ovl_idx = mode->overlay_idx;
409 int mode_idx = mode->mode_idx;
410 struct omap_overlay *ovl;
411 enum omap_color_mode supported_modes;
412 struct fb_var_screeninfo var;
413 int i;
414
415 if (ovl_idx >= fbdev->num_overlays)
416 return -ENODEV;
417 ovl = fbdev->overlays[ovl_idx];
418 supported_modes = ovl->supported_modes;
419
420 mode_idx = mode->mode_idx;
421
422 for (i = 0; i < sizeof(supported_modes) * 8; i++) {
423 if (!(supported_modes & (1 << i)))
424 continue;
425 /*
426 * It's possible that the FB doesn't support a mode
427 * that is supported by the overlay, so call the
428 * following here.
429 */
430 if (dss_mode_to_fb_mode(1 << i, &var) < 0)
431 continue;
432
433 mode_idx--;
434 if (mode_idx < 0)
435 break;
436 }
437
438 if (i == sizeof(supported_modes) * 8)
439 return -ENOENT;
440
441 mode->bits_per_pixel = var.bits_per_pixel;
442 mode->nonstd = var.nonstd;
443 mode->red = var.red;
444 mode->green = var.green;
445 mode->blue = var.blue;
446 mode->transp = var.transp;
447
448 return 0;
449}
450
451static int omapfb_wait_for_go(struct fb_info *fbi)
452{
453 struct omapfb_info *ofbi = FB2OFB(fbi);
454 int r = 0;
455 int i;
456
457 for (i = 0; i < ofbi->num_overlays; ++i) {
458 struct omap_overlay *ovl = ofbi->overlays[i];
459 r = ovl->wait_for_go(ovl);
460 if (r)
461 break;
462 }
463
464 return r;
465}
466
467int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
468{
469 struct omapfb_info *ofbi = FB2OFB(fbi);
470 struct omapfb2_device *fbdev = ofbi->fbdev;
471 struct omap_dss_device *display = fb2display(fbi);
472
473 union {
474 struct omapfb_update_window_old uwnd_o;
475 struct omapfb_update_window uwnd;
476 struct omapfb_plane_info plane_info;
477 struct omapfb_caps caps;
478 struct omapfb_mem_info mem_info;
479 struct omapfb_color_key color_key;
480 struct omapfb_ovl_colormode ovl_colormode;
481 enum omapfb_update_mode update_mode;
482 int test_num;
483 struct omapfb_memory_read memory_read;
484 struct omapfb_vram_info vram_info;
485 struct omapfb_tearsync_info tearsync_info;
486 } p;
487
488 int r = 0;
489
490 switch (cmd) {
491 case OMAPFB_SYNC_GFX:
492 DBG("ioctl SYNC_GFX\n");
493 if (!display || !display->sync) {
494 /* DSS1 never returns an error here, so we neither */
495 /*r = -EINVAL;*/
496 break;
497 }
498
499 r = display->sync(display);
500 break;
501
502 case OMAPFB_UPDATE_WINDOW_OLD:
503 DBG("ioctl UPDATE_WINDOW_OLD\n");
504 if (!display || !display->update) {
505 r = -EINVAL;
506 break;
507 }
508
509 if (copy_from_user(&p.uwnd_o,
510 (void __user *)arg,
511 sizeof(p.uwnd_o))) {
512 r = -EFAULT;
513 break;
514 }
515
516 r = omapfb_update_window_nolock(fbi, p.uwnd_o.x, p.uwnd_o.y,
517 p.uwnd_o.width, p.uwnd_o.height);
518 break;
519
520 case OMAPFB_UPDATE_WINDOW:
521 DBG("ioctl UPDATE_WINDOW\n");
522 if (!display || !display->update) {
523 r = -EINVAL;
524 break;
525 }
526
527 if (copy_from_user(&p.uwnd, (void __user *)arg,
528 sizeof(p.uwnd))) {
529 r = -EFAULT;
530 break;
531 }
532
533 r = omapfb_update_window_nolock(fbi, p.uwnd.x, p.uwnd.y,
534 p.uwnd.width, p.uwnd.height);
535 break;
536
537 case OMAPFB_SETUP_PLANE:
538 DBG("ioctl SETUP_PLANE\n");
539 if (copy_from_user(&p.plane_info, (void __user *)arg,
540 sizeof(p.plane_info)))
541 r = -EFAULT;
542 else
543 r = omapfb_setup_plane(fbi, &p.plane_info);
544 break;
545
546 case OMAPFB_QUERY_PLANE:
547 DBG("ioctl QUERY_PLANE\n");
548 r = omapfb_query_plane(fbi, &p.plane_info);
549 if (r < 0)
550 break;
551 if (copy_to_user((void __user *)arg, &p.plane_info,
552 sizeof(p.plane_info)))
553 r = -EFAULT;
554 break;
555
556 case OMAPFB_SETUP_MEM:
557 DBG("ioctl SETUP_MEM\n");
558 if (copy_from_user(&p.mem_info, (void __user *)arg,
559 sizeof(p.mem_info)))
560 r = -EFAULT;
561 else
562 r = omapfb_setup_mem(fbi, &p.mem_info);
563 break;
564
565 case OMAPFB_QUERY_MEM:
566 DBG("ioctl QUERY_MEM\n");
567 r = omapfb_query_mem(fbi, &p.mem_info);
568 if (r < 0)
569 break;
570 if (copy_to_user((void __user *)arg, &p.mem_info,
571 sizeof(p.mem_info)))
572 r = -EFAULT;
573 break;
574
575 case OMAPFB_GET_CAPS:
576 DBG("ioctl GET_CAPS\n");
577 if (!display) {
578 r = -EINVAL;
579 break;
580 }
581
582 memset(&p.caps, 0, sizeof(p.caps));
583 if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
584 p.caps.ctrl |= OMAPFB_CAPS_MANUAL_UPDATE;
585 if (display->caps & OMAP_DSS_DISPLAY_CAP_TEAR_ELIM)
586 p.caps.ctrl |= OMAPFB_CAPS_TEARSYNC;
587
588 if (copy_to_user((void __user *)arg, &p.caps, sizeof(p.caps)))
589 r = -EFAULT;
590 break;
591
592 case OMAPFB_GET_OVERLAY_COLORMODE:
593 DBG("ioctl GET_OVERLAY_COLORMODE\n");
594 if (copy_from_user(&p.ovl_colormode, (void __user *)arg,
595 sizeof(p.ovl_colormode))) {
596 r = -EFAULT;
597 break;
598 }
599 r = omapfb_get_ovl_colormode(fbdev, &p.ovl_colormode);
600 if (r < 0)
601 break;
602 if (copy_to_user((void __user *)arg, &p.ovl_colormode,
603 sizeof(p.ovl_colormode)))
604 r = -EFAULT;
605 break;
606
607 case OMAPFB_SET_UPDATE_MODE:
608 DBG("ioctl SET_UPDATE_MODE\n");
609 if (get_user(p.update_mode, (int __user *)arg))
610 r = -EFAULT;
611 else
612 r = omapfb_set_update_mode(fbi, p.update_mode);
613 break;
614
615 case OMAPFB_GET_UPDATE_MODE:
616 DBG("ioctl GET_UPDATE_MODE\n");
617 r = omapfb_get_update_mode(fbi, &p.update_mode);
618 if (r)
619 break;
620 if (put_user(p.update_mode,
621 (enum omapfb_update_mode __user *)arg))
622 r = -EFAULT;
623 break;
624
625 case OMAPFB_SET_COLOR_KEY:
626 DBG("ioctl SET_COLOR_KEY\n");
627 if (copy_from_user(&p.color_key, (void __user *)arg,
628 sizeof(p.color_key)))
629 r = -EFAULT;
630 else
631 r = omapfb_set_color_key(fbi, &p.color_key);
632 break;
633
634 case OMAPFB_GET_COLOR_KEY:
635 DBG("ioctl GET_COLOR_KEY\n");
636 r = omapfb_get_color_key(fbi, &p.color_key);
637 if (r)
638 break;
639 if (copy_to_user((void __user *)arg, &p.color_key,
640 sizeof(p.color_key)))
641 r = -EFAULT;
642 break;
643
644 case OMAPFB_WAITFORVSYNC:
645 DBG("ioctl WAITFORVSYNC\n");
646 if (!display) {
647 r = -EINVAL;
648 break;
649 }
650
651 r = display->wait_vsync(display);
652 break;
653
654 case OMAPFB_WAITFORGO:
655 DBG("ioctl WAITFORGO\n");
656 if (!display) {
657 r = -EINVAL;
658 break;
659 }
660
661 r = omapfb_wait_for_go(fbi);
662 break;
663
664 /* LCD and CTRL tests do the same thing for backward
665 * compatibility */
666 case OMAPFB_LCD_TEST:
667 DBG("ioctl LCD_TEST\n");
668 if (get_user(p.test_num, (int __user *)arg)) {
669 r = -EFAULT;
670 break;
671 }
672 if (!display || !display->run_test) {
673 r = -EINVAL;
674 break;
675 }
676
677 r = display->run_test(display, p.test_num);
678
679 break;
680
681 case OMAPFB_CTRL_TEST:
682 DBG("ioctl CTRL_TEST\n");
683 if (get_user(p.test_num, (int __user *)arg)) {
684 r = -EFAULT;
685 break;
686 }
687 if (!display || !display->run_test) {
688 r = -EINVAL;
689 break;
690 }
691
692 r = display->run_test(display, p.test_num);
693
694 break;
695
696 case OMAPFB_MEMORY_READ:
697 DBG("ioctl MEMORY_READ\n");
698
699 if (copy_from_user(&p.memory_read, (void __user *)arg,
700 sizeof(p.memory_read))) {
701 r = -EFAULT;
702 break;
703 }
704
705 r = omapfb_memory_read(fbi, &p.memory_read);
706
707 break;
708
709 case OMAPFB_GET_VRAM_INFO: {
710 unsigned long vram, free, largest;
711
712 DBG("ioctl GET_VRAM_INFO\n");
713
714 omap_vram_get_info(&vram, &free, &largest);
715 p.vram_info.total = vram;
716 p.vram_info.free = free;
717 p.vram_info.largest_free_block = largest;
718
719 if (copy_to_user((void __user *)arg, &p.vram_info,
720 sizeof(p.vram_info)))
721 r = -EFAULT;
722 break;
723 }
724
725 case OMAPFB_SET_TEARSYNC: {
726 DBG("ioctl SET_TEARSYNC\n");
727
728 if (copy_from_user(&p.tearsync_info, (void __user *)arg,
729 sizeof(p.tearsync_info))) {
730 r = -EFAULT;
731 break;
732 }
733
734 if (!display->enable_te) {
735 r = -ENODEV;
736 break;
737 }
738
739 r = display->enable_te(display, !!p.tearsync_info.enabled);
740
741 break;
742 }
743
744 default:
745 dev_err(fbdev->dev, "Unknown ioctl 0x%x\n", cmd);
746 r = -EINVAL;
747 }
748
749 if (r < 0)
750 DBG("ioctl failed: %d\n", r);
751
752 return r;
753}
754
755
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
new file mode 100644
index 000000000000..ef299839858a
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -0,0 +1,2261 @@
1/*
2 * linux/drivers/video/omap2/omapfb-main.c
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/module.h>
24#include <linux/delay.h>
25#include <linux/fb.h>
26#include <linux/dma-mapping.h>
27#include <linux/vmalloc.h>
28#include <linux/device.h>
29#include <linux/platform_device.h>
30#include <linux/omapfb.h>
31
32#include <plat/display.h>
33#include <plat/vram.h>
34#include <plat/vrfb.h>
35
36#include "omapfb.h"
37
38#define MODULE_NAME "omapfb"
39
40#define OMAPFB_PLANE_XRES_MIN 8
41#define OMAPFB_PLANE_YRES_MIN 8
42
43static char *def_mode;
44static char *def_vram;
45static int def_vrfb;
46static int def_rotate;
47static int def_mirror;
48
49#ifdef DEBUG
50unsigned int omapfb_debug;
51module_param_named(debug, omapfb_debug, bool, 0644);
52static unsigned int omapfb_test_pattern;
53module_param_named(test, omapfb_test_pattern, bool, 0644);
54#endif
55
56static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi);
57
58#ifdef DEBUG
59static void draw_pixel(struct fb_info *fbi, int x, int y, unsigned color)
60{
61 struct fb_var_screeninfo *var = &fbi->var;
62 struct fb_fix_screeninfo *fix = &fbi->fix;
63 void __iomem *addr = fbi->screen_base;
64 const unsigned bytespp = var->bits_per_pixel >> 3;
65 const unsigned line_len = fix->line_length / bytespp;
66
67 int r = (color >> 16) & 0xff;
68 int g = (color >> 8) & 0xff;
69 int b = (color >> 0) & 0xff;
70
71 if (var->bits_per_pixel == 16) {
72 u16 __iomem *p = (u16 __iomem *)addr;
73 p += y * line_len + x;
74
75 r = r * 32 / 256;
76 g = g * 64 / 256;
77 b = b * 32 / 256;
78
79 __raw_writew((r << 11) | (g << 5) | (b << 0), p);
80 } else if (var->bits_per_pixel == 24) {
81 u8 __iomem *p = (u8 __iomem *)addr;
82 p += (y * line_len + x) * 3;
83
84 __raw_writeb(b, p + 0);
85 __raw_writeb(g, p + 1);
86 __raw_writeb(r, p + 2);
87 } else if (var->bits_per_pixel == 32) {
88 u32 __iomem *p = (u32 __iomem *)addr;
89 p += y * line_len + x;
90 __raw_writel(color, p);
91 }
92}
93
94static void fill_fb(struct fb_info *fbi)
95{
96 struct fb_var_screeninfo *var = &fbi->var;
97 const short w = var->xres_virtual;
98 const short h = var->yres_virtual;
99 void __iomem *addr = fbi->screen_base;
100 int y, x;
101
102 if (!addr)
103 return;
104
105 DBG("fill_fb %dx%d, line_len %d bytes\n", w, h, fbi->fix.line_length);
106
107 for (y = 0; y < h; y++) {
108 for (x = 0; x < w; x++) {
109 if (x < 20 && y < 20)
110 draw_pixel(fbi, x, y, 0xffffff);
111 else if (x < 20 && (y > 20 && y < h - 20))
112 draw_pixel(fbi, x, y, 0xff);
113 else if (y < 20 && (x > 20 && x < w - 20))
114 draw_pixel(fbi, x, y, 0xff00);
115 else if (x > w - 20 && (y > 20 && y < h - 20))
116 draw_pixel(fbi, x, y, 0xff0000);
117 else if (y > h - 20 && (x > 20 && x < w - 20))
118 draw_pixel(fbi, x, y, 0xffff00);
119 else if (x == 20 || x == w - 20 ||
120 y == 20 || y == h - 20)
121 draw_pixel(fbi, x, y, 0xffffff);
122 else if (x == y || w - x == h - y)
123 draw_pixel(fbi, x, y, 0xff00ff);
124 else if (w - x == y || x == h - y)
125 draw_pixel(fbi, x, y, 0x00ffff);
126 else if (x > 20 && y > 20 && x < w - 20 && y < h - 20) {
127 int t = x * 3 / w;
128 unsigned r = 0, g = 0, b = 0;
129 unsigned c;
130 if (var->bits_per_pixel == 16) {
131 if (t == 0)
132 b = (y % 32) * 256 / 32;
133 else if (t == 1)
134 g = (y % 64) * 256 / 64;
135 else if (t == 2)
136 r = (y % 32) * 256 / 32;
137 } else {
138 if (t == 0)
139 b = (y % 256);
140 else if (t == 1)
141 g = (y % 256);
142 else if (t == 2)
143 r = (y % 256);
144 }
145 c = (r << 16) | (g << 8) | (b << 0);
146 draw_pixel(fbi, x, y, c);
147 } else {
148 draw_pixel(fbi, x, y, 0);
149 }
150 }
151 }
152}
153#endif
154
155static unsigned omapfb_get_vrfb_offset(struct omapfb_info *ofbi, int rot)
156{
157 struct vrfb *vrfb = &ofbi->region.vrfb;
158 unsigned offset;
159
160 switch (rot) {
161 case FB_ROTATE_UR:
162 offset = 0;
163 break;
164 case FB_ROTATE_CW:
165 offset = vrfb->yoffset;
166 break;
167 case FB_ROTATE_UD:
168 offset = vrfb->yoffset * OMAP_VRFB_LINE_LEN + vrfb->xoffset;
169 break;
170 case FB_ROTATE_CCW:
171 offset = vrfb->xoffset * OMAP_VRFB_LINE_LEN;
172 break;
173 default:
174 BUG();
175 }
176
177 offset *= vrfb->bytespp;
178
179 return offset;
180}
181
182static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot)
183{
184 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
185 return ofbi->region.vrfb.paddr[rot]
186 + omapfb_get_vrfb_offset(ofbi, rot);
187 } else {
188 return ofbi->region.paddr;
189 }
190}
191
192static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi)
193{
194 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
195 return ofbi->region.vrfb.paddr[0];
196 else
197 return ofbi->region.paddr;
198}
199
200static void __iomem *omapfb_get_region_vaddr(struct omapfb_info *ofbi)
201{
202 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
203 return ofbi->region.vrfb.vaddr[0];
204 else
205 return ofbi->region.vaddr;
206}
207
208static struct omapfb_colormode omapfb_colormodes[] = {
209 {
210 .dssmode = OMAP_DSS_COLOR_UYVY,
211 .bits_per_pixel = 16,
212 .nonstd = OMAPFB_COLOR_YUV422,
213 }, {
214 .dssmode = OMAP_DSS_COLOR_YUV2,
215 .bits_per_pixel = 16,
216 .nonstd = OMAPFB_COLOR_YUY422,
217 }, {
218 .dssmode = OMAP_DSS_COLOR_ARGB16,
219 .bits_per_pixel = 16,
220 .red = { .length = 4, .offset = 8, .msb_right = 0 },
221 .green = { .length = 4, .offset = 4, .msb_right = 0 },
222 .blue = { .length = 4, .offset = 0, .msb_right = 0 },
223 .transp = { .length = 4, .offset = 12, .msb_right = 0 },
224 }, {
225 .dssmode = OMAP_DSS_COLOR_RGB16,
226 .bits_per_pixel = 16,
227 .red = { .length = 5, .offset = 11, .msb_right = 0 },
228 .green = { .length = 6, .offset = 5, .msb_right = 0 },
229 .blue = { .length = 5, .offset = 0, .msb_right = 0 },
230 .transp = { .length = 0, .offset = 0, .msb_right = 0 },
231 }, {
232 .dssmode = OMAP_DSS_COLOR_RGB24P,
233 .bits_per_pixel = 24,
234 .red = { .length = 8, .offset = 16, .msb_right = 0 },
235 .green = { .length = 8, .offset = 8, .msb_right = 0 },
236 .blue = { .length = 8, .offset = 0, .msb_right = 0 },
237 .transp = { .length = 0, .offset = 0, .msb_right = 0 },
238 }, {
239 .dssmode = OMAP_DSS_COLOR_RGB24U,
240 .bits_per_pixel = 32,
241 .red = { .length = 8, .offset = 16, .msb_right = 0 },
242 .green = { .length = 8, .offset = 8, .msb_right = 0 },
243 .blue = { .length = 8, .offset = 0, .msb_right = 0 },
244 .transp = { .length = 0, .offset = 0, .msb_right = 0 },
245 }, {
246 .dssmode = OMAP_DSS_COLOR_ARGB32,
247 .bits_per_pixel = 32,
248 .red = { .length = 8, .offset = 16, .msb_right = 0 },
249 .green = { .length = 8, .offset = 8, .msb_right = 0 },
250 .blue = { .length = 8, .offset = 0, .msb_right = 0 },
251 .transp = { .length = 8, .offset = 24, .msb_right = 0 },
252 }, {
253 .dssmode = OMAP_DSS_COLOR_RGBA32,
254 .bits_per_pixel = 32,
255 .red = { .length = 8, .offset = 24, .msb_right = 0 },
256 .green = { .length = 8, .offset = 16, .msb_right = 0 },
257 .blue = { .length = 8, .offset = 8, .msb_right = 0 },
258 .transp = { .length = 8, .offset = 0, .msb_right = 0 },
259 }, {
260 .dssmode = OMAP_DSS_COLOR_RGBX32,
261 .bits_per_pixel = 32,
262 .red = { .length = 8, .offset = 24, .msb_right = 0 },
263 .green = { .length = 8, .offset = 16, .msb_right = 0 },
264 .blue = { .length = 8, .offset = 8, .msb_right = 0 },
265 .transp = { .length = 0, .offset = 0, .msb_right = 0 },
266 },
267};
268
269static bool cmp_var_to_colormode(struct fb_var_screeninfo *var,
270 struct omapfb_colormode *color)
271{
272 bool cmp_component(struct fb_bitfield *f1, struct fb_bitfield *f2)
273 {
274 return f1->length == f2->length &&
275 f1->offset == f2->offset &&
276 f1->msb_right == f2->msb_right;
277 }
278
279 if (var->bits_per_pixel == 0 ||
280 var->red.length == 0 ||
281 var->blue.length == 0 ||
282 var->green.length == 0)
283 return 0;
284
285 return var->bits_per_pixel == color->bits_per_pixel &&
286 cmp_component(&var->red, &color->red) &&
287 cmp_component(&var->green, &color->green) &&
288 cmp_component(&var->blue, &color->blue) &&
289 cmp_component(&var->transp, &color->transp);
290}
291
292static void assign_colormode_to_var(struct fb_var_screeninfo *var,
293 struct omapfb_colormode *color)
294{
295 var->bits_per_pixel = color->bits_per_pixel;
296 var->nonstd = color->nonstd;
297 var->red = color->red;
298 var->green = color->green;
299 var->blue = color->blue;
300 var->transp = color->transp;
301}
302
303static int fb_mode_to_dss_mode(struct fb_var_screeninfo *var,
304 enum omap_color_mode *mode)
305{
306 enum omap_color_mode dssmode;
307 int i;
308
309 /* first match with nonstd field */
310 if (var->nonstd) {
311 for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
312 struct omapfb_colormode *m = &omapfb_colormodes[i];
313 if (var->nonstd == m->nonstd) {
314 assign_colormode_to_var(var, m);
315 *mode = m->dssmode;
316 return 0;
317 }
318 }
319
320 return -EINVAL;
321 }
322
323 /* then try exact match of bpp and colors */
324 for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
325 struct omapfb_colormode *m = &omapfb_colormodes[i];
326 if (cmp_var_to_colormode(var, m)) {
327 assign_colormode_to_var(var, m);
328 *mode = m->dssmode;
329 return 0;
330 }
331 }
332
333 /* match with bpp if user has not filled color fields
334 * properly */
335 switch (var->bits_per_pixel) {
336 case 1:
337 dssmode = OMAP_DSS_COLOR_CLUT1;
338 break;
339 case 2:
340 dssmode = OMAP_DSS_COLOR_CLUT2;
341 break;
342 case 4:
343 dssmode = OMAP_DSS_COLOR_CLUT4;
344 break;
345 case 8:
346 dssmode = OMAP_DSS_COLOR_CLUT8;
347 break;
348 case 12:
349 dssmode = OMAP_DSS_COLOR_RGB12U;
350 break;
351 case 16:
352 dssmode = OMAP_DSS_COLOR_RGB16;
353 break;
354 case 24:
355 dssmode = OMAP_DSS_COLOR_RGB24P;
356 break;
357 case 32:
358 dssmode = OMAP_DSS_COLOR_RGB24U;
359 break;
360 default:
361 return -EINVAL;
362 }
363
364 for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
365 struct omapfb_colormode *m = &omapfb_colormodes[i];
366 if (dssmode == m->dssmode) {
367 assign_colormode_to_var(var, m);
368 *mode = m->dssmode;
369 return 0;
370 }
371 }
372
373 return -EINVAL;
374}
375
376static int check_fb_res_bounds(struct fb_var_screeninfo *var)
377{
378 int xres_min = OMAPFB_PLANE_XRES_MIN;
379 int xres_max = 2048;
380 int yres_min = OMAPFB_PLANE_YRES_MIN;
381 int yres_max = 2048;
382
383 /* XXX: some applications seem to set virtual res to 0. */
384 if (var->xres_virtual == 0)
385 var->xres_virtual = var->xres;
386
387 if (var->yres_virtual == 0)
388 var->yres_virtual = var->yres;
389
390 if (var->xres_virtual < xres_min || var->yres_virtual < yres_min)
391 return -EINVAL;
392
393 if (var->xres < xres_min)
394 var->xres = xres_min;
395 if (var->yres < yres_min)
396 var->yres = yres_min;
397 if (var->xres > xres_max)
398 var->xres = xres_max;
399 if (var->yres > yres_max)
400 var->yres = yres_max;
401
402 if (var->xres > var->xres_virtual)
403 var->xres = var->xres_virtual;
404 if (var->yres > var->yres_virtual)
405 var->yres = var->yres_virtual;
406
407 return 0;
408}
409
410static void shrink_height(unsigned long max_frame_size,
411 struct fb_var_screeninfo *var)
412{
413 DBG("can't fit FB into memory, reducing y\n");
414 var->yres_virtual = max_frame_size /
415 (var->xres_virtual * var->bits_per_pixel >> 3);
416
417 if (var->yres_virtual < OMAPFB_PLANE_YRES_MIN)
418 var->yres_virtual = OMAPFB_PLANE_YRES_MIN;
419
420 if (var->yres > var->yres_virtual)
421 var->yres = var->yres_virtual;
422}
423
424static void shrink_width(unsigned long max_frame_size,
425 struct fb_var_screeninfo *var)
426{
427 DBG("can't fit FB into memory, reducing x\n");
428 var->xres_virtual = max_frame_size / var->yres_virtual /
429 (var->bits_per_pixel >> 3);
430
431 if (var->xres_virtual < OMAPFB_PLANE_XRES_MIN)
432 var->xres_virtual = OMAPFB_PLANE_XRES_MIN;
433
434 if (var->xres > var->xres_virtual)
435 var->xres = var->xres_virtual;
436}
437
438static int check_vrfb_fb_size(unsigned long region_size,
439 const struct fb_var_screeninfo *var)
440{
441 unsigned long min_phys_size = omap_vrfb_min_phys_size(var->xres_virtual,
442 var->yres_virtual, var->bits_per_pixel >> 3);
443
444 return min_phys_size > region_size ? -EINVAL : 0;
445}
446
447static int check_fb_size(const struct omapfb_info *ofbi,
448 struct fb_var_screeninfo *var)
449{
450 unsigned long max_frame_size = ofbi->region.size;
451 int bytespp = var->bits_per_pixel >> 3;
452 unsigned long line_size = var->xres_virtual * bytespp;
453
454 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
455 /* One needs to check for both VRFB and OMAPFB limitations. */
456 if (check_vrfb_fb_size(max_frame_size, var))
457 shrink_height(omap_vrfb_max_height(
458 max_frame_size, var->xres_virtual, bytespp) *
459 line_size, var);
460
461 if (check_vrfb_fb_size(max_frame_size, var)) {
462 DBG("cannot fit FB to memory\n");
463 return -EINVAL;
464 }
465
466 return 0;
467 }
468
469 DBG("max frame size %lu, line size %lu\n", max_frame_size, line_size);
470
471 if (line_size * var->yres_virtual > max_frame_size)
472 shrink_height(max_frame_size, var);
473
474 if (line_size * var->yres_virtual > max_frame_size) {
475 shrink_width(max_frame_size, var);
476 line_size = var->xres_virtual * bytespp;
477 }
478
479 if (line_size * var->yres_virtual > max_frame_size) {
480 DBG("cannot fit FB to memory\n");
481 return -EINVAL;
482 }
483
484 return 0;
485}
486
487/*
488 * Consider if VRFB assisted rotation is in use and if the virtual space for
489 * the zero degree view needs to be mapped. The need for mapping also acts as
490 * the trigger for setting up the hardware on the context in question. This
491 * ensures that one does not attempt to access the virtual view before the
492 * hardware is serving the address translations.
493 */
494static int setup_vrfb_rotation(struct fb_info *fbi)
495{
496 struct omapfb_info *ofbi = FB2OFB(fbi);
497 struct omapfb2_mem_region *rg = &ofbi->region;
498 struct vrfb *vrfb = &rg->vrfb;
499 struct fb_var_screeninfo *var = &fbi->var;
500 struct fb_fix_screeninfo *fix = &fbi->fix;
501 unsigned bytespp;
502 bool yuv_mode;
503 enum omap_color_mode mode;
504 int r;
505 bool reconf;
506
507 if (!rg->size || ofbi->rotation_type != OMAP_DSS_ROT_VRFB)
508 return 0;
509
510 DBG("setup_vrfb_rotation\n");
511
512 r = fb_mode_to_dss_mode(var, &mode);
513 if (r)
514 return r;
515
516 bytespp = var->bits_per_pixel >> 3;
517
518 yuv_mode = mode == OMAP_DSS_COLOR_YUV2 || mode == OMAP_DSS_COLOR_UYVY;
519
520 /* We need to reconfigure VRFB if the resolution changes, if yuv mode
521 * is enabled/disabled, or if bytes per pixel changes */
522
523 /* XXX we shouldn't allow this when framebuffer is mmapped */
524
525 reconf = false;
526
527 if (yuv_mode != vrfb->yuv_mode)
528 reconf = true;
529 else if (bytespp != vrfb->bytespp)
530 reconf = true;
531 else if (vrfb->xres != var->xres_virtual ||
532 vrfb->yres != var->yres_virtual)
533 reconf = true;
534
535 if (vrfb->vaddr[0] && reconf) {
536 fbi->screen_base = NULL;
537 fix->smem_start = 0;
538 fix->smem_len = 0;
539 iounmap(vrfb->vaddr[0]);
540 vrfb->vaddr[0] = NULL;
541 DBG("setup_vrfb_rotation: reset fb\n");
542 }
543
544 if (vrfb->vaddr[0])
545 return 0;
546
547 omap_vrfb_setup(&rg->vrfb, rg->paddr,
548 var->xres_virtual,
549 var->yres_virtual,
550 bytespp, yuv_mode);
551
552 /* Now one can ioremap the 0 angle view */
553 r = omap_vrfb_map_angle(vrfb, var->yres_virtual, 0);
554 if (r)
555 return r;
556
557 /* used by open/write in fbmem.c */
558 fbi->screen_base = ofbi->region.vrfb.vaddr[0];
559
560 fix->smem_start = ofbi->region.vrfb.paddr[0];
561
562 switch (var->nonstd) {
563 case OMAPFB_COLOR_YUV422:
564 case OMAPFB_COLOR_YUY422:
565 fix->line_length =
566 (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2;
567 break;
568 default:
569 fix->line_length =
570 (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3;
571 break;
572 }
573
574 fix->smem_len = var->yres_virtual * fix->line_length;
575
576 return 0;
577}
578
579int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
580 struct fb_var_screeninfo *var)
581{
582 int i;
583
584 for (i = 0; i < ARRAY_SIZE(omapfb_colormodes); ++i) {
585 struct omapfb_colormode *mode = &omapfb_colormodes[i];
586 if (dssmode == mode->dssmode) {
587 assign_colormode_to_var(var, mode);
588 return 0;
589 }
590 }
591 return -ENOENT;
592}
593
594void set_fb_fix(struct fb_info *fbi)
595{
596 struct fb_fix_screeninfo *fix = &fbi->fix;
597 struct fb_var_screeninfo *var = &fbi->var;
598 struct omapfb_info *ofbi = FB2OFB(fbi);
599 struct omapfb2_mem_region *rg = &ofbi->region;
600
601 DBG("set_fb_fix\n");
602
603 /* used by open/write in fbmem.c */
604 fbi->screen_base = (char __iomem *)omapfb_get_region_vaddr(ofbi);
605
606 /* used by mmap in fbmem.c */
607 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
608 switch (var->nonstd) {
609 case OMAPFB_COLOR_YUV422:
610 case OMAPFB_COLOR_YUY422:
611 fix->line_length =
612 (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 2;
613 break;
614 default:
615 fix->line_length =
616 (OMAP_VRFB_LINE_LEN * var->bits_per_pixel) >> 3;
617 break;
618 }
619
620 fix->smem_len = var->yres_virtual * fix->line_length;
621 } else {
622 fix->line_length =
623 (var->xres_virtual * var->bits_per_pixel) >> 3;
624 fix->smem_len = rg->size;
625 }
626
627 fix->smem_start = omapfb_get_region_paddr(ofbi);
628
629 fix->type = FB_TYPE_PACKED_PIXELS;
630
631 if (var->nonstd)
632 fix->visual = FB_VISUAL_PSEUDOCOLOR;
633 else {
634 switch (var->bits_per_pixel) {
635 case 32:
636 case 24:
637 case 16:
638 case 12:
639 fix->visual = FB_VISUAL_TRUECOLOR;
640 /* 12bpp is stored in 16 bits */
641 break;
642 case 1:
643 case 2:
644 case 4:
645 case 8:
646 fix->visual = FB_VISUAL_PSEUDOCOLOR;
647 break;
648 }
649 }
650
651 fix->accel = FB_ACCEL_NONE;
652
653 fix->xpanstep = 1;
654 fix->ypanstep = 1;
655}
656
657/* check new var and possibly modify it to be ok */
658int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
659{
660 struct omapfb_info *ofbi = FB2OFB(fbi);
661 struct omap_dss_device *display = fb2display(fbi);
662 enum omap_color_mode mode = 0;
663 int i;
664 int r;
665
666 DBG("check_fb_var %d\n", ofbi->id);
667
668 if (ofbi->region.size == 0)
669 return 0;
670
671 r = fb_mode_to_dss_mode(var, &mode);
672 if (r) {
673 DBG("cannot convert var to omap dss mode\n");
674 return r;
675 }
676
677 for (i = 0; i < ofbi->num_overlays; ++i) {
678 if ((ofbi->overlays[i]->supported_modes & mode) == 0) {
679 DBG("invalid mode\n");
680 return -EINVAL;
681 }
682 }
683
684 if (var->rotate < 0 || var->rotate > 3)
685 return -EINVAL;
686
687 if (check_fb_res_bounds(var))
688 return -EINVAL;
689
690 if (check_fb_size(ofbi, var))
691 return -EINVAL;
692
693 if (var->xres + var->xoffset > var->xres_virtual)
694 var->xoffset = var->xres_virtual - var->xres;
695 if (var->yres + var->yoffset > var->yres_virtual)
696 var->yoffset = var->yres_virtual - var->yres;
697
698 DBG("xres = %d, yres = %d, vxres = %d, vyres = %d\n",
699 var->xres, var->yres,
700 var->xres_virtual, var->yres_virtual);
701
702 var->height = -1;
703 var->width = -1;
704 var->grayscale = 0;
705
706 if (display && display->get_timings) {
707 struct omap_video_timings timings;
708 display->get_timings(display, &timings);
709
710 /* pixclock in ps, the rest in pixclock */
711 var->pixclock = timings.pixel_clock != 0 ?
712 KHZ2PICOS(timings.pixel_clock) :
713 0;
714 var->left_margin = timings.hfp;
715 var->right_margin = timings.hbp;
716 var->upper_margin = timings.vfp;
717 var->lower_margin = timings.vbp;
718 var->hsync_len = timings.hsw;
719 var->vsync_len = timings.vsw;
720 } else {
721 var->pixclock = 0;
722 var->left_margin = 0;
723 var->right_margin = 0;
724 var->upper_margin = 0;
725 var->lower_margin = 0;
726 var->hsync_len = 0;
727 var->vsync_len = 0;
728 }
729
730 /* TODO: get these from panel->config */
731 var->vmode = FB_VMODE_NONINTERLACED;
732 var->sync = 0;
733
734 return 0;
735}
736
737/*
738 * ---------------------------------------------------------------------------
739 * fbdev framework callbacks
740 * ---------------------------------------------------------------------------
741 */
742static int omapfb_open(struct fb_info *fbi, int user)
743{
744 return 0;
745}
746
747static int omapfb_release(struct fb_info *fbi, int user)
748{
749#if 0
750 struct omapfb_info *ofbi = FB2OFB(fbi);
751 struct omapfb2_device *fbdev = ofbi->fbdev;
752 struct omap_dss_device *display = fb2display(fbi);
753
754 DBG("Closing fb with plane index %d\n", ofbi->id);
755
756 omapfb_lock(fbdev);
757
758 if (display && display->get_update_mode && display->update) {
759 /* XXX this update should be removed, I think. But it's
760 * good for debugging */
761 if (display->get_update_mode(display) ==
762 OMAP_DSS_UPDATE_MANUAL) {
763 u16 w, h;
764
765 if (display->sync)
766 display->sync(display);
767
768 display->get_resolution(display, &w, &h);
769 display->update(display, 0, 0, w, h);
770 }
771 }
772
773 if (display && display->sync)
774 display->sync(display);
775
776 omapfb_unlock(fbdev);
777#endif
778 return 0;
779}
780
781static unsigned calc_rotation_offset_dma(struct fb_var_screeninfo *var,
782 struct fb_fix_screeninfo *fix, int rotation)
783{
784 unsigned offset;
785
786 offset = var->yoffset * fix->line_length +
787 var->xoffset * (var->bits_per_pixel >> 3);
788
789 return offset;
790}
791
792static unsigned calc_rotation_offset_vrfb(struct fb_var_screeninfo *var,
793 struct fb_fix_screeninfo *fix, int rotation)
794{
795 unsigned offset;
796
797 if (rotation == FB_ROTATE_UD)
798 offset = (var->yres_virtual - var->yres) *
799 fix->line_length;
800 else if (rotation == FB_ROTATE_CW)
801 offset = (var->yres_virtual - var->yres) *
802 (var->bits_per_pixel >> 3);
803 else
804 offset = 0;
805
806 if (rotation == FB_ROTATE_UR)
807 offset += var->yoffset * fix->line_length +
808 var->xoffset * (var->bits_per_pixel >> 3);
809 else if (rotation == FB_ROTATE_UD)
810 offset -= var->yoffset * fix->line_length +
811 var->xoffset * (var->bits_per_pixel >> 3);
812 else if (rotation == FB_ROTATE_CW)
813 offset -= var->xoffset * fix->line_length +
814 var->yoffset * (var->bits_per_pixel >> 3);
815 else if (rotation == FB_ROTATE_CCW)
816 offset += var->xoffset * fix->line_length +
817 var->yoffset * (var->bits_per_pixel >> 3);
818
819 return offset;
820}
821
822
823/* setup overlay according to the fb */
824static int omapfb_setup_overlay(struct fb_info *fbi, struct omap_overlay *ovl,
825 u16 posx, u16 posy, u16 outw, u16 outh)
826{
827 int r = 0;
828 struct omapfb_info *ofbi = FB2OFB(fbi);
829 struct fb_var_screeninfo *var = &fbi->var;
830 struct fb_fix_screeninfo *fix = &fbi->fix;
831 enum omap_color_mode mode = 0;
832 int offset;
833 u32 data_start_p;
834 void __iomem *data_start_v;
835 struct omap_overlay_info info;
836 int xres, yres;
837 int screen_width;
838 int mirror;
839 int rotation = var->rotate;
840 int i;
841
842 for (i = 0; i < ofbi->num_overlays; i++) {
843 if (ovl != ofbi->overlays[i])
844 continue;
845
846 rotation = (rotation + ofbi->rotation[i]) % 4;
847 break;
848 }
849
850 DBG("setup_overlay %d, posx %d, posy %d, outw %d, outh %d\n", ofbi->id,
851 posx, posy, outw, outh);
852
853 if (rotation == FB_ROTATE_CW || rotation == FB_ROTATE_CCW) {
854 xres = var->yres;
855 yres = var->xres;
856 } else {
857 xres = var->xres;
858 yres = var->yres;
859 }
860
861
862 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
863 data_start_p = omapfb_get_region_rot_paddr(ofbi, rotation);
864 data_start_v = NULL;
865 } else {
866 data_start_p = omapfb_get_region_paddr(ofbi);
867 data_start_v = omapfb_get_region_vaddr(ofbi);
868 }
869
870 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
871 offset = calc_rotation_offset_vrfb(var, fix, rotation);
872 else
873 offset = calc_rotation_offset_dma(var, fix, rotation);
874
875 data_start_p += offset;
876 data_start_v += offset;
877
878 if (offset)
879 DBG("offset %d, %d = %d\n",
880 var->xoffset, var->yoffset, offset);
881
882 DBG("paddr %x, vaddr %p\n", data_start_p, data_start_v);
883
884 r = fb_mode_to_dss_mode(var, &mode);
885 if (r) {
886 DBG("fb_mode_to_dss_mode failed");
887 goto err;
888 }
889
890 switch (var->nonstd) {
891 case OMAPFB_COLOR_YUV422:
892 case OMAPFB_COLOR_YUY422:
893 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
894 screen_width = fix->line_length
895 / (var->bits_per_pixel >> 2);
896 break;
897 }
898 default:
899 screen_width = fix->line_length / (var->bits_per_pixel >> 3);
900 break;
901 }
902
903 ovl->get_overlay_info(ovl, &info);
904
905 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
906 mirror = 0;
907 else
908 mirror = ofbi->mirror;
909
910 info.paddr = data_start_p;
911 info.vaddr = data_start_v;
912 info.screen_width = screen_width;
913 info.width = xres;
914 info.height = yres;
915 info.color_mode = mode;
916 info.rotation_type = ofbi->rotation_type;
917 info.rotation = rotation;
918 info.mirror = mirror;
919
920 info.pos_x = posx;
921 info.pos_y = posy;
922 info.out_width = outw;
923 info.out_height = outh;
924
925 r = ovl->set_overlay_info(ovl, &info);
926 if (r) {
927 DBG("ovl->setup_overlay_info failed\n");
928 goto err;
929 }
930
931 return 0;
932
933err:
934 DBG("setup_overlay failed\n");
935 return r;
936}
937
938/* apply var to the overlay */
939int omapfb_apply_changes(struct fb_info *fbi, int init)
940{
941 int r = 0;
942 struct omapfb_info *ofbi = FB2OFB(fbi);
943 struct fb_var_screeninfo *var = &fbi->var;
944 struct omap_overlay *ovl;
945 u16 posx, posy;
946 u16 outw, outh;
947 int i;
948
949#ifdef DEBUG
950 if (omapfb_test_pattern)
951 fill_fb(fbi);
952#endif
953
954 for (i = 0; i < ofbi->num_overlays; i++) {
955 ovl = ofbi->overlays[i];
956
957 DBG("apply_changes, fb %d, ovl %d\n", ofbi->id, ovl->id);
958
959 if (ofbi->region.size == 0) {
960 /* the fb is not available. disable the overlay */
961 omapfb_overlay_enable(ovl, 0);
962 if (!init && ovl->manager)
963 ovl->manager->apply(ovl->manager);
964 continue;
965 }
966
967 if (init || (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
968 int rotation = (var->rotate + ofbi->rotation[i]) % 4;
969 if (rotation == FB_ROTATE_CW ||
970 rotation == FB_ROTATE_CCW) {
971 outw = var->yres;
972 outh = var->xres;
973 } else {
974 outw = var->xres;
975 outh = var->yres;
976 }
977 } else {
978 outw = ovl->info.out_width;
979 outh = ovl->info.out_height;
980 }
981
982 if (init) {
983 posx = 0;
984 posy = 0;
985 } else {
986 posx = ovl->info.pos_x;
987 posy = ovl->info.pos_y;
988 }
989
990 r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh);
991 if (r)
992 goto err;
993
994 if (!init && ovl->manager)
995 ovl->manager->apply(ovl->manager);
996 }
997 return 0;
998err:
999 DBG("apply_changes failed\n");
1000 return r;
1001}
1002
1003/* checks var and eventually tweaks it to something supported,
1004 * DO NOT MODIFY PAR */
1005static int omapfb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi)
1006{
1007 int r;
1008
1009 DBG("check_var(%d)\n", FB2OFB(fbi)->id);
1010
1011 r = check_fb_var(fbi, var);
1012
1013 return r;
1014}
1015
1016/* set the video mode according to info->var */
1017static int omapfb_set_par(struct fb_info *fbi)
1018{
1019 int r;
1020
1021 DBG("set_par(%d)\n", FB2OFB(fbi)->id);
1022
1023 set_fb_fix(fbi);
1024
1025 r = setup_vrfb_rotation(fbi);
1026 if (r)
1027 return r;
1028
1029 r = omapfb_apply_changes(fbi, 0);
1030
1031 return r;
1032}
1033
1034static int omapfb_pan_display(struct fb_var_screeninfo *var,
1035 struct fb_info *fbi)
1036{
1037 struct fb_var_screeninfo new_var;
1038 int r;
1039
1040 DBG("pan_display(%d)\n", FB2OFB(fbi)->id);
1041
1042 if (var->xoffset == fbi->var.xoffset &&
1043 var->yoffset == fbi->var.yoffset)
1044 return 0;
1045
1046 new_var = fbi->var;
1047 new_var.xoffset = var->xoffset;
1048 new_var.yoffset = var->yoffset;
1049
1050 fbi->var = new_var;
1051
1052 r = omapfb_apply_changes(fbi, 0);
1053
1054 return r;
1055}
1056
1057static void mmap_user_open(struct vm_area_struct *vma)
1058{
1059 struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
1060
1061 atomic_inc(&ofbi->map_count);
1062}
1063
1064static void mmap_user_close(struct vm_area_struct *vma)
1065{
1066 struct omapfb_info *ofbi = (struct omapfb_info *)vma->vm_private_data;
1067
1068 atomic_dec(&ofbi->map_count);
1069}
1070
1071static struct vm_operations_struct mmap_user_ops = {
1072 .open = mmap_user_open,
1073 .close = mmap_user_close,
1074};
1075
1076static int omapfb_mmap(struct fb_info *fbi, struct vm_area_struct *vma)
1077{
1078 struct omapfb_info *ofbi = FB2OFB(fbi);
1079 struct fb_fix_screeninfo *fix = &fbi->fix;
1080 unsigned long off;
1081 unsigned long start;
1082 u32 len;
1083
1084 if (vma->vm_end - vma->vm_start == 0)
1085 return 0;
1086 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
1087 return -EINVAL;
1088 off = vma->vm_pgoff << PAGE_SHIFT;
1089
1090 start = omapfb_get_region_paddr(ofbi);
1091 len = fix->smem_len;
1092 if (off >= len)
1093 return -EINVAL;
1094 if ((vma->vm_end - vma->vm_start + off) > len)
1095 return -EINVAL;
1096
1097 off += start;
1098
1099 DBG("user mmap region start %lx, len %d, off %lx\n", start, len, off);
1100
1101 vma->vm_pgoff = off >> PAGE_SHIFT;
1102 vma->vm_flags |= VM_IO | VM_RESERVED;
1103 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1104 vma->vm_ops = &mmap_user_ops;
1105 vma->vm_private_data = ofbi;
1106 if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
1107 vma->vm_end - vma->vm_start, vma->vm_page_prot))
1108 return -EAGAIN;
1109 /* vm_ops.open won't be called for mmap itself. */
1110 atomic_inc(&ofbi->map_count);
1111 return 0;
1112}
1113
1114/* Store a single color palette entry into a pseudo palette or the hardware
1115 * palette if one is available. For now we support only 16bpp and thus store
1116 * the entry only to the pseudo palette.
1117 */
1118static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
1119 u_int blue, u_int transp, int update_hw_pal)
1120{
1121 /*struct omapfb_info *ofbi = FB2OFB(fbi);*/
1122 /*struct omapfb2_device *fbdev = ofbi->fbdev;*/
1123 struct fb_var_screeninfo *var = &fbi->var;
1124 int r = 0;
1125
1126 enum omapfb_color_format mode = OMAPFB_COLOR_RGB24U; /* XXX */
1127
1128 /*switch (plane->color_mode) {*/
1129 switch (mode) {
1130 case OMAPFB_COLOR_YUV422:
1131 case OMAPFB_COLOR_YUV420:
1132 case OMAPFB_COLOR_YUY422:
1133 r = -EINVAL;
1134 break;
1135 case OMAPFB_COLOR_CLUT_8BPP:
1136 case OMAPFB_COLOR_CLUT_4BPP:
1137 case OMAPFB_COLOR_CLUT_2BPP:
1138 case OMAPFB_COLOR_CLUT_1BPP:
1139 /*
1140 if (fbdev->ctrl->setcolreg)
1141 r = fbdev->ctrl->setcolreg(regno, red, green, blue,
1142 transp, update_hw_pal);
1143 */
1144 /* Fallthrough */
1145 r = -EINVAL;
1146 break;
1147 case OMAPFB_COLOR_RGB565:
1148 case OMAPFB_COLOR_RGB444:
1149 case OMAPFB_COLOR_RGB24P:
1150 case OMAPFB_COLOR_RGB24U:
1151 if (r != 0)
1152 break;
1153
1154 if (regno < 0) {
1155 r = -EINVAL;
1156 break;
1157 }
1158
1159 if (regno < 16) {
1160 u16 pal;
1161 pal = ((red >> (16 - var->red.length)) <<
1162 var->red.offset) |
1163 ((green >> (16 - var->green.length)) <<
1164 var->green.offset) |
1165 (blue >> (16 - var->blue.length));
1166 ((u32 *)(fbi->pseudo_palette))[regno] = pal;
1167 }
1168 break;
1169 default:
1170 BUG();
1171 }
1172 return r;
1173}
1174
1175static int omapfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
1176 u_int transp, struct fb_info *info)
1177{
1178 DBG("setcolreg\n");
1179
1180 return _setcolreg(info, regno, red, green, blue, transp, 1);
1181}
1182
1183static int omapfb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
1184{
1185 int count, index, r;
1186 u16 *red, *green, *blue, *transp;
1187 u16 trans = 0xffff;
1188
1189 DBG("setcmap\n");
1190
1191 red = cmap->red;
1192 green = cmap->green;
1193 blue = cmap->blue;
1194 transp = cmap->transp;
1195 index = cmap->start;
1196
1197 for (count = 0; count < cmap->len; count++) {
1198 if (transp)
1199 trans = *transp++;
1200 r = _setcolreg(info, index++, *red++, *green++, *blue++, trans,
1201 count == cmap->len - 1);
1202 if (r != 0)
1203 return r;
1204 }
1205
1206 return 0;
1207}
1208
1209static int omapfb_blank(int blank, struct fb_info *fbi)
1210{
1211 struct omapfb_info *ofbi = FB2OFB(fbi);
1212 struct omapfb2_device *fbdev = ofbi->fbdev;
1213 struct omap_dss_device *display = fb2display(fbi);
1214 int do_update = 0;
1215 int r = 0;
1216
1217 omapfb_lock(fbdev);
1218
1219 switch (blank) {
1220 case FB_BLANK_UNBLANK:
1221 if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
1222 goto exit;
1223
1224 if (display->resume)
1225 r = display->resume(display);
1226
1227 if (r == 0 && display->get_update_mode &&
1228 display->get_update_mode(display) ==
1229 OMAP_DSS_UPDATE_MANUAL)
1230 do_update = 1;
1231
1232 break;
1233
1234 case FB_BLANK_NORMAL:
1235 /* FB_BLANK_NORMAL could be implemented.
1236 * Needs DSS additions. */
1237 case FB_BLANK_VSYNC_SUSPEND:
1238 case FB_BLANK_HSYNC_SUSPEND:
1239 case FB_BLANK_POWERDOWN:
1240 if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
1241 goto exit;
1242
1243 if (display->suspend)
1244 r = display->suspend(display);
1245
1246 break;
1247
1248 default:
1249 r = -EINVAL;
1250 }
1251
1252exit:
1253 omapfb_unlock(fbdev);
1254
1255 if (r == 0 && do_update && display->update) {
1256 u16 w, h;
1257 display->get_resolution(display, &w, &h);
1258
1259 r = display->update(display, 0, 0, w, h);
1260 }
1261
1262 return r;
1263}
1264
1265#if 0
1266/* XXX fb_read and fb_write are needed for VRFB */
1267ssize_t omapfb_write(struct fb_info *info, const char __user *buf,
1268 size_t count, loff_t *ppos)
1269{
1270 DBG("omapfb_write %d, %lu\n", count, (unsigned long)*ppos);
1271 /* XXX needed for VRFB */
1272 return count;
1273}
1274#endif
1275
1276static struct fb_ops omapfb_ops = {
1277 .owner = THIS_MODULE,
1278 .fb_open = omapfb_open,
1279 .fb_release = omapfb_release,
1280 .fb_fillrect = cfb_fillrect,
1281 .fb_copyarea = cfb_copyarea,
1282 .fb_imageblit = cfb_imageblit,
1283 .fb_blank = omapfb_blank,
1284 .fb_ioctl = omapfb_ioctl,
1285 .fb_check_var = omapfb_check_var,
1286 .fb_set_par = omapfb_set_par,
1287 .fb_pan_display = omapfb_pan_display,
1288 .fb_mmap = omapfb_mmap,
1289 .fb_setcolreg = omapfb_setcolreg,
1290 .fb_setcmap = omapfb_setcmap,
1291 /*.fb_write = omapfb_write,*/
1292};
1293
1294static void omapfb_free_fbmem(struct fb_info *fbi)
1295{
1296 struct omapfb_info *ofbi = FB2OFB(fbi);
1297 struct omapfb2_device *fbdev = ofbi->fbdev;
1298 struct omapfb2_mem_region *rg;
1299
1300 rg = &ofbi->region;
1301
1302 if (rg->paddr)
1303 if (omap_vram_free(rg->paddr, rg->size))
1304 dev_err(fbdev->dev, "VRAM FREE failed\n");
1305
1306 if (rg->vaddr)
1307 iounmap(rg->vaddr);
1308
1309 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
1310 /* unmap the 0 angle rotation */
1311 if (rg->vrfb.vaddr[0]) {
1312 iounmap(rg->vrfb.vaddr[0]);
1313 omap_vrfb_release_ctx(&rg->vrfb);
1314 }
1315 }
1316
1317 rg->vaddr = NULL;
1318 rg->paddr = 0;
1319 rg->alloc = 0;
1320 rg->size = 0;
1321}
1322
1323static void clear_fb_info(struct fb_info *fbi)
1324{
1325 memset(&fbi->var, 0, sizeof(fbi->var));
1326 memset(&fbi->fix, 0, sizeof(fbi->fix));
1327 strlcpy(fbi->fix.id, MODULE_NAME, sizeof(fbi->fix.id));
1328}
1329
1330static int omapfb_free_all_fbmem(struct omapfb2_device *fbdev)
1331{
1332 int i;
1333
1334 DBG("free all fbmem\n");
1335
1336 for (i = 0; i < fbdev->num_fbs; i++) {
1337 struct fb_info *fbi = fbdev->fbs[i];
1338 omapfb_free_fbmem(fbi);
1339 clear_fb_info(fbi);
1340 }
1341
1342 return 0;
1343}
1344
1345static int omapfb_alloc_fbmem(struct fb_info *fbi, unsigned long size,
1346 unsigned long paddr)
1347{
1348 struct omapfb_info *ofbi = FB2OFB(fbi);
1349 struct omapfb2_device *fbdev = ofbi->fbdev;
1350 struct omapfb2_mem_region *rg;
1351 void __iomem *vaddr;
1352 int r;
1353
1354 rg = &ofbi->region;
1355 memset(rg, 0, sizeof(*rg));
1356
1357 size = PAGE_ALIGN(size);
1358
1359 if (!paddr) {
1360 DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
1361 r = omap_vram_alloc(OMAP_VRAM_MEMTYPE_SDRAM, size, &paddr);
1362 } else {
1363 DBG("reserving %lu bytes at %lx for fb %d\n", size, paddr,
1364 ofbi->id);
1365 r = omap_vram_reserve(paddr, size);
1366 }
1367
1368 if (r) {
1369 dev_err(fbdev->dev, "failed to allocate framebuffer\n");
1370 return -ENOMEM;
1371 }
1372
1373 if (ofbi->rotation_type != OMAP_DSS_ROT_VRFB) {
1374 vaddr = ioremap_wc(paddr, size);
1375
1376 if (!vaddr) {
1377 dev_err(fbdev->dev, "failed to ioremap framebuffer\n");
1378 omap_vram_free(paddr, size);
1379 return -ENOMEM;
1380 }
1381
1382 DBG("allocated VRAM paddr %lx, vaddr %p\n", paddr, vaddr);
1383 } else {
1384 r = omap_vrfb_request_ctx(&rg->vrfb);
1385 if (r) {
1386 dev_err(fbdev->dev, "vrfb create ctx failed\n");
1387 return r;
1388 }
1389
1390 vaddr = NULL;
1391 }
1392
1393 rg->paddr = paddr;
1394 rg->vaddr = vaddr;
1395 rg->size = size;
1396 rg->alloc = 1;
1397
1398 return 0;
1399}
1400
1401/* allocate fbmem using display resolution as reference */
1402static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
1403 unsigned long paddr)
1404{
1405 struct omapfb_info *ofbi = FB2OFB(fbi);
1406 struct omap_dss_device *display;
1407 int bytespp;
1408
1409 display = fb2display(fbi);
1410
1411 if (!display)
1412 return 0;
1413
1414 switch (display->get_recommended_bpp(display)) {
1415 case 16:
1416 bytespp = 2;
1417 break;
1418 case 24:
1419 bytespp = 4;
1420 break;
1421 default:
1422 bytespp = 4;
1423 break;
1424 }
1425
1426 if (!size) {
1427 u16 w, h;
1428
1429 display->get_resolution(display, &w, &h);
1430
1431 if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
1432 size = max(omap_vrfb_min_phys_size(w, h, bytespp),
1433 omap_vrfb_min_phys_size(h, w, bytespp));
1434
1435 DBG("adjusting fb mem size for VRFB, %u -> %lu\n",
1436 w * h * bytespp, size);
1437 } else {
1438 size = w * h * bytespp;
1439 }
1440 }
1441
1442 if (!size)
1443 return 0;
1444
1445 return omapfb_alloc_fbmem(fbi, size, paddr);
1446}
1447
1448static enum omap_color_mode fb_format_to_dss_mode(enum omapfb_color_format fmt)
1449{
1450 enum omap_color_mode mode;
1451
1452 switch (fmt) {
1453 case OMAPFB_COLOR_RGB565:
1454 mode = OMAP_DSS_COLOR_RGB16;
1455 break;
1456 case OMAPFB_COLOR_YUV422:
1457 mode = OMAP_DSS_COLOR_YUV2;
1458 break;
1459 case OMAPFB_COLOR_CLUT_8BPP:
1460 mode = OMAP_DSS_COLOR_CLUT8;
1461 break;
1462 case OMAPFB_COLOR_CLUT_4BPP:
1463 mode = OMAP_DSS_COLOR_CLUT4;
1464 break;
1465 case OMAPFB_COLOR_CLUT_2BPP:
1466 mode = OMAP_DSS_COLOR_CLUT2;
1467 break;
1468 case OMAPFB_COLOR_CLUT_1BPP:
1469 mode = OMAP_DSS_COLOR_CLUT1;
1470 break;
1471 case OMAPFB_COLOR_RGB444:
1472 mode = OMAP_DSS_COLOR_RGB12U;
1473 break;
1474 case OMAPFB_COLOR_YUY422:
1475 mode = OMAP_DSS_COLOR_UYVY;
1476 break;
1477 case OMAPFB_COLOR_ARGB16:
1478 mode = OMAP_DSS_COLOR_ARGB16;
1479 break;
1480 case OMAPFB_COLOR_RGB24U:
1481 mode = OMAP_DSS_COLOR_RGB24U;
1482 break;
1483 case OMAPFB_COLOR_RGB24P:
1484 mode = OMAP_DSS_COLOR_RGB24P;
1485 break;
1486 case OMAPFB_COLOR_ARGB32:
1487 mode = OMAP_DSS_COLOR_ARGB32;
1488 break;
1489 case OMAPFB_COLOR_RGBA32:
1490 mode = OMAP_DSS_COLOR_RGBA32;
1491 break;
1492 case OMAPFB_COLOR_RGBX32:
1493 mode = OMAP_DSS_COLOR_RGBX32;
1494 break;
1495 default:
1496 mode = -EINVAL;
1497 }
1498
1499 return mode;
1500}
1501
1502static int omapfb_parse_vram_param(const char *param, int max_entries,
1503 unsigned long *sizes, unsigned long *paddrs)
1504{
1505 int fbnum;
1506 unsigned long size;
1507 unsigned long paddr = 0;
1508 char *p, *start;
1509
1510 start = (char *)param;
1511
1512 while (1) {
1513 p = start;
1514
1515 fbnum = simple_strtoul(p, &p, 10);
1516
1517 if (p == param)
1518 return -EINVAL;
1519
1520 if (*p != ':')
1521 return -EINVAL;
1522
1523 if (fbnum >= max_entries)
1524 return -EINVAL;
1525
1526 size = memparse(p + 1, &p);
1527
1528 if (!size)
1529 return -EINVAL;
1530
1531 paddr = 0;
1532
1533 if (*p == '@') {
1534 paddr = simple_strtoul(p + 1, &p, 16);
1535
1536 if (!paddr)
1537 return -EINVAL;
1538
1539 }
1540
1541 paddrs[fbnum] = paddr;
1542 sizes[fbnum] = size;
1543
1544 if (*p == 0)
1545 break;
1546
1547 if (*p != ',')
1548 return -EINVAL;
1549
1550 ++p;
1551
1552 start = p;
1553 }
1554
1555 return 0;
1556}
1557
1558static int omapfb_allocate_all_fbs(struct omapfb2_device *fbdev)
1559{
1560 int i, r;
1561 unsigned long vram_sizes[10];
1562 unsigned long vram_paddrs[10];
1563
1564 memset(&vram_sizes, 0, sizeof(vram_sizes));
1565 memset(&vram_paddrs, 0, sizeof(vram_paddrs));
1566
1567 if (def_vram && omapfb_parse_vram_param(def_vram, 10,
1568 vram_sizes, vram_paddrs)) {
1569 dev_err(fbdev->dev, "failed to parse vram parameter\n");
1570
1571 memset(&vram_sizes, 0, sizeof(vram_sizes));
1572 memset(&vram_paddrs, 0, sizeof(vram_paddrs));
1573 }
1574
1575 if (fbdev->dev->platform_data) {
1576 struct omapfb_platform_data *opd;
1577 opd = fbdev->dev->platform_data;
1578 for (i = 0; i < opd->mem_desc.region_cnt; ++i) {
1579 if (!vram_sizes[i]) {
1580 unsigned long size;
1581 unsigned long paddr;
1582
1583 size = opd->mem_desc.region[i].size;
1584 paddr = opd->mem_desc.region[i].paddr;
1585
1586 vram_sizes[i] = size;
1587 vram_paddrs[i] = paddr;
1588 }
1589 }
1590 }
1591
1592 for (i = 0; i < fbdev->num_fbs; i++) {
1593 /* allocate memory automatically only for fb0, or if
1594 * excplicitly defined with vram or plat data option */
1595 if (i == 0 || vram_sizes[i] != 0) {
1596 r = omapfb_alloc_fbmem_display(fbdev->fbs[i],
1597 vram_sizes[i], vram_paddrs[i]);
1598
1599 if (r)
1600 return r;
1601 }
1602 }
1603
1604 for (i = 0; i < fbdev->num_fbs; i++) {
1605 struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
1606 struct omapfb2_mem_region *rg;
1607 rg = &ofbi->region;
1608
1609 DBG("region%d phys %08x virt %p size=%lu\n",
1610 i,
1611 rg->paddr,
1612 rg->vaddr,
1613 rg->size);
1614 }
1615
1616 return 0;
1617}
1618
1619int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
1620{
1621 struct omapfb_info *ofbi = FB2OFB(fbi);
1622 struct omapfb2_device *fbdev = ofbi->fbdev;
1623 struct omap_dss_device *display = fb2display(fbi);
1624 struct omapfb2_mem_region *rg = &ofbi->region;
1625 unsigned long old_size = rg->size;
1626 unsigned long old_paddr = rg->paddr;
1627 int old_type = rg->type;
1628 int r;
1629
1630 if (type > OMAPFB_MEMTYPE_MAX)
1631 return -EINVAL;
1632
1633 size = PAGE_ALIGN(size);
1634
1635 if (old_size == size && old_type == type)
1636 return 0;
1637
1638 if (display && display->sync)
1639 display->sync(display);
1640
1641 omapfb_free_fbmem(fbi);
1642
1643 if (size == 0) {
1644 clear_fb_info(fbi);
1645 return 0;
1646 }
1647
1648 r = omapfb_alloc_fbmem(fbi, size, 0);
1649
1650 if (r) {
1651 if (old_size)
1652 omapfb_alloc_fbmem(fbi, old_size, old_paddr);
1653
1654 if (rg->size == 0)
1655 clear_fb_info(fbi);
1656
1657 return r;
1658 }
1659
1660 if (old_size == size)
1661 return 0;
1662
1663 if (old_size == 0) {
1664 DBG("initializing fb %d\n", ofbi->id);
1665 r = omapfb_fb_init(fbdev, fbi);
1666 if (r) {
1667 DBG("omapfb_fb_init failed\n");
1668 goto err;
1669 }
1670 r = omapfb_apply_changes(fbi, 1);
1671 if (r) {
1672 DBG("omapfb_apply_changes failed\n");
1673 goto err;
1674 }
1675 } else {
1676 struct fb_var_screeninfo new_var;
1677 memcpy(&new_var, &fbi->var, sizeof(new_var));
1678 r = check_fb_var(fbi, &new_var);
1679 if (r)
1680 goto err;
1681 memcpy(&fbi->var, &new_var, sizeof(fbi->var));
1682 set_fb_fix(fbi);
1683 r = setup_vrfb_rotation(fbi);
1684 if (r)
1685 goto err;
1686 }
1687
1688 return 0;
1689err:
1690 omapfb_free_fbmem(fbi);
1691 clear_fb_info(fbi);
1692 return r;
1693}
1694
1695/* initialize fb_info, var, fix to something sane based on the display */
1696static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
1697{
1698 struct fb_var_screeninfo *var = &fbi->var;
1699 struct omap_dss_device *display = fb2display(fbi);
1700 struct omapfb_info *ofbi = FB2OFB(fbi);
1701 int r = 0;
1702
1703 fbi->fbops = &omapfb_ops;
1704 fbi->flags = FBINFO_FLAG_DEFAULT;
1705 fbi->pseudo_palette = fbdev->pseudo_palette;
1706
1707 if (ofbi->region.size == 0) {
1708 clear_fb_info(fbi);
1709 return 0;
1710 }
1711
1712 var->nonstd = 0;
1713 var->bits_per_pixel = 0;
1714
1715 var->rotate = def_rotate;
1716
1717 /*
1718 * Check if there is a default color format set in the board file,
1719 * and use this format instead the default deducted from the
1720 * display bpp.
1721 */
1722 if (fbdev->dev->platform_data) {
1723 struct omapfb_platform_data *opd;
1724 int id = ofbi->id;
1725
1726 opd = fbdev->dev->platform_data;
1727 if (opd->mem_desc.region[id].format_used) {
1728 enum omap_color_mode mode;
1729 enum omapfb_color_format format;
1730
1731 format = opd->mem_desc.region[id].format;
1732 mode = fb_format_to_dss_mode(format);
1733 if (mode < 0) {
1734 r = mode;
1735 goto err;
1736 }
1737 r = dss_mode_to_fb_mode(mode, var);
1738 if (r < 0)
1739 goto err;
1740 }
1741 }
1742
1743 if (display) {
1744 u16 w, h;
1745 int rotation = (var->rotate + ofbi->rotation[0]) % 4;
1746
1747 display->get_resolution(display, &w, &h);
1748
1749 if (rotation == FB_ROTATE_CW ||
1750 rotation == FB_ROTATE_CCW) {
1751 var->xres = h;
1752 var->yres = w;
1753 } else {
1754 var->xres = w;
1755 var->yres = h;
1756 }
1757
1758 var->xres_virtual = var->xres;
1759 var->yres_virtual = var->yres;
1760
1761 if (!var->bits_per_pixel) {
1762 switch (display->get_recommended_bpp(display)) {
1763 case 16:
1764 var->bits_per_pixel = 16;
1765 break;
1766 case 24:
1767 var->bits_per_pixel = 32;
1768 break;
1769 default:
1770 dev_err(fbdev->dev, "illegal display "
1771 "bpp\n");
1772 return -EINVAL;
1773 }
1774 }
1775 } else {
1776 /* if there's no display, let's just guess some basic values */
1777 var->xres = 320;
1778 var->yres = 240;
1779 var->xres_virtual = var->xres;
1780 var->yres_virtual = var->yres;
1781 if (!var->bits_per_pixel)
1782 var->bits_per_pixel = 16;
1783 }
1784
1785 r = check_fb_var(fbi, var);
1786 if (r)
1787 goto err;
1788
1789 set_fb_fix(fbi);
1790 r = setup_vrfb_rotation(fbi);
1791 if (r)
1792 goto err;
1793
1794 r = fb_alloc_cmap(&fbi->cmap, 256, 0);
1795 if (r)
1796 dev_err(fbdev->dev, "unable to allocate color map memory\n");
1797
1798err:
1799 return r;
1800}
1801
1802static void fbinfo_cleanup(struct omapfb2_device *fbdev, struct fb_info *fbi)
1803{
1804 fb_dealloc_cmap(&fbi->cmap);
1805}
1806
1807
1808static void omapfb_free_resources(struct omapfb2_device *fbdev)
1809{
1810 int i;
1811
1812 DBG("free_resources\n");
1813
1814 if (fbdev == NULL)
1815 return;
1816
1817 for (i = 0; i < fbdev->num_fbs; i++)
1818 unregister_framebuffer(fbdev->fbs[i]);
1819
1820 /* free the reserved fbmem */
1821 omapfb_free_all_fbmem(fbdev);
1822
1823 for (i = 0; i < fbdev->num_fbs; i++) {
1824 fbinfo_cleanup(fbdev, fbdev->fbs[i]);
1825 framebuffer_release(fbdev->fbs[i]);
1826 }
1827
1828 for (i = 0; i < fbdev->num_displays; i++) {
1829 if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
1830 fbdev->displays[i]->disable(fbdev->displays[i]);
1831
1832 omap_dss_put_device(fbdev->displays[i]);
1833 }
1834
1835 dev_set_drvdata(fbdev->dev, NULL);
1836 kfree(fbdev);
1837}
1838
1839static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
1840{
1841 int r, i;
1842
1843 fbdev->num_fbs = 0;
1844
1845 DBG("create %d framebuffers\n", CONFIG_FB_OMAP2_NUM_FBS);
1846
1847 /* allocate fb_infos */
1848 for (i = 0; i < CONFIG_FB_OMAP2_NUM_FBS; i++) {
1849 struct fb_info *fbi;
1850 struct omapfb_info *ofbi;
1851
1852 fbi = framebuffer_alloc(sizeof(struct omapfb_info),
1853 fbdev->dev);
1854
1855 if (fbi == NULL) {
1856 dev_err(fbdev->dev,
1857 "unable to allocate memory for plane info\n");
1858 return -ENOMEM;
1859 }
1860
1861 clear_fb_info(fbi);
1862
1863 fbdev->fbs[i] = fbi;
1864
1865 ofbi = FB2OFB(fbi);
1866 ofbi->fbdev = fbdev;
1867 ofbi->id = i;
1868
1869 /* assign these early, so that fb alloc can use them */
1870 ofbi->rotation_type = def_vrfb ? OMAP_DSS_ROT_VRFB :
1871 OMAP_DSS_ROT_DMA;
1872 ofbi->mirror = def_mirror;
1873
1874 fbdev->num_fbs++;
1875 }
1876
1877 DBG("fb_infos allocated\n");
1878
1879 /* assign overlays for the fbs */
1880 for (i = 0; i < min(fbdev->num_fbs, fbdev->num_overlays); i++) {
1881 struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
1882
1883 ofbi->overlays[0] = fbdev->overlays[i];
1884 ofbi->num_overlays = 1;
1885 }
1886
1887 /* allocate fb memories */
1888 r = omapfb_allocate_all_fbs(fbdev);
1889 if (r) {
1890 dev_err(fbdev->dev, "failed to allocate fbmem\n");
1891 return r;
1892 }
1893
1894 DBG("fbmems allocated\n");
1895
1896 /* setup fb_infos */
1897 for (i = 0; i < fbdev->num_fbs; i++) {
1898 r = omapfb_fb_init(fbdev, fbdev->fbs[i]);
1899 if (r) {
1900 dev_err(fbdev->dev, "failed to setup fb_info\n");
1901 return r;
1902 }
1903 }
1904
1905 DBG("fb_infos initialized\n");
1906
1907 for (i = 0; i < fbdev->num_fbs; i++) {
1908 r = register_framebuffer(fbdev->fbs[i]);
1909 if (r != 0) {
1910 dev_err(fbdev->dev,
1911 "registering framebuffer %d failed\n", i);
1912 return r;
1913 }
1914 }
1915
1916 DBG("framebuffers registered\n");
1917
1918 for (i = 0; i < fbdev->num_fbs; i++) {
1919 r = omapfb_apply_changes(fbdev->fbs[i], 1);
1920 if (r) {
1921 dev_err(fbdev->dev, "failed to change mode\n");
1922 return r;
1923 }
1924 }
1925
1926 DBG("create sysfs for fbs\n");
1927 r = omapfb_create_sysfs(fbdev);
1928 if (r) {
1929 dev_err(fbdev->dev, "failed to create sysfs entries\n");
1930 return r;
1931 }
1932
1933 /* Enable fb0 */
1934 if (fbdev->num_fbs > 0) {
1935 struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[0]);
1936
1937 if (ofbi->num_overlays > 0) {
1938 struct omap_overlay *ovl = ofbi->overlays[0];
1939
1940 r = omapfb_overlay_enable(ovl, 1);
1941
1942 if (r) {
1943 dev_err(fbdev->dev,
1944 "failed to enable overlay\n");
1945 return r;
1946 }
1947 }
1948 }
1949
1950 DBG("create_framebuffers done\n");
1951
1952 return 0;
1953}
1954
1955static int omapfb_mode_to_timings(const char *mode_str,
1956 struct omap_video_timings *timings, u8 *bpp)
1957{
1958 struct fb_info fbi;
1959 struct fb_var_screeninfo var;
1960 struct fb_ops fbops;
1961 int r;
1962
1963#ifdef CONFIG_OMAP2_DSS_VENC
1964 if (strcmp(mode_str, "pal") == 0) {
1965 *timings = omap_dss_pal_timings;
1966 *bpp = 0;
1967 return 0;
1968 } else if (strcmp(mode_str, "ntsc") == 0) {
1969 *timings = omap_dss_ntsc_timings;
1970 *bpp = 0;
1971 return 0;
1972 }
1973#endif
1974
1975 /* this is quite a hack, but I wanted to use the modedb and for
1976 * that we need fb_info and var, so we create dummy ones */
1977
1978 memset(&fbi, 0, sizeof(fbi));
1979 memset(&var, 0, sizeof(var));
1980 memset(&fbops, 0, sizeof(fbops));
1981 fbi.fbops = &fbops;
1982
1983 r = fb_find_mode(&var, &fbi, mode_str, NULL, 0, NULL, 24);
1984
1985 if (r != 0) {
1986 timings->pixel_clock = PICOS2KHZ(var.pixclock);
1987 timings->hfp = var.left_margin;
1988 timings->hbp = var.right_margin;
1989 timings->vfp = var.upper_margin;
1990 timings->vbp = var.lower_margin;
1991 timings->hsw = var.hsync_len;
1992 timings->vsw = var.vsync_len;
1993 timings->x_res = var.xres;
1994 timings->y_res = var.yres;
1995
1996 switch (var.bits_per_pixel) {
1997 case 16:
1998 *bpp = 16;
1999 break;
2000 case 24:
2001 case 32:
2002 default:
2003 *bpp = 24;
2004 break;
2005 }
2006
2007 return 0;
2008 } else {
2009 return -EINVAL;
2010 }
2011}
2012
2013static int omapfb_set_def_mode(struct omap_dss_device *display, char *mode_str)
2014{
2015 int r;
2016 u8 bpp;
2017 struct omap_video_timings timings;
2018
2019 r = omapfb_mode_to_timings(mode_str, &timings, &bpp);
2020 if (r)
2021 return r;
2022
2023 display->panel.recommended_bpp = bpp;
2024
2025 if (!display->check_timings || !display->set_timings)
2026 return -EINVAL;
2027
2028 r = display->check_timings(display, &timings);
2029 if (r)
2030 return r;
2031
2032 display->set_timings(display, &timings);
2033
2034 return 0;
2035}
2036
2037static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
2038{
2039 char *str, *options, *this_opt;
2040 int r = 0;
2041
2042 str = kmalloc(strlen(def_mode) + 1, GFP_KERNEL);
2043 strcpy(str, def_mode);
2044 options = str;
2045
2046 while (!r && (this_opt = strsep(&options, ",")) != NULL) {
2047 char *p, *display_str, *mode_str;
2048 struct omap_dss_device *display;
2049 int i;
2050
2051 p = strchr(this_opt, ':');
2052 if (!p) {
2053 r = -EINVAL;
2054 break;
2055 }
2056
2057 *p = 0;
2058 display_str = this_opt;
2059 mode_str = p + 1;
2060
2061 display = NULL;
2062 for (i = 0; i < fbdev->num_displays; ++i) {
2063 if (strcmp(fbdev->displays[i]->name,
2064 display_str) == 0) {
2065 display = fbdev->displays[i];
2066 break;
2067 }
2068 }
2069
2070 if (!display) {
2071 r = -EINVAL;
2072 break;
2073 }
2074
2075 r = omapfb_set_def_mode(display, mode_str);
2076 if (r)
2077 break;
2078 }
2079
2080 kfree(str);
2081
2082 return r;
2083}
2084
2085static int omapfb_probe(struct platform_device *pdev)
2086{
2087 struct omapfb2_device *fbdev = NULL;
2088 int r = 0;
2089 int i;
2090 struct omap_overlay *ovl;
2091 struct omap_dss_device *def_display;
2092 struct omap_dss_device *dssdev;
2093
2094 DBG("omapfb_probe\n");
2095
2096 if (pdev->num_resources != 0) {
2097 dev_err(&pdev->dev, "probed for an unknown device\n");
2098 r = -ENODEV;
2099 goto err0;
2100 }
2101
2102 fbdev = kzalloc(sizeof(struct omapfb2_device), GFP_KERNEL);
2103 if (fbdev == NULL) {
2104 r = -ENOMEM;
2105 goto err0;
2106 }
2107
2108 mutex_init(&fbdev->mtx);
2109
2110 fbdev->dev = &pdev->dev;
2111 platform_set_drvdata(pdev, fbdev);
2112
2113 fbdev->num_displays = 0;
2114 dssdev = NULL;
2115 for_each_dss_dev(dssdev) {
2116 omap_dss_get_device(dssdev);
2117 fbdev->displays[fbdev->num_displays++] = dssdev;
2118 }
2119
2120 if (fbdev->num_displays == 0) {
2121 dev_err(&pdev->dev, "no displays\n");
2122 r = -EINVAL;
2123 goto cleanup;
2124 }
2125
2126 fbdev->num_overlays = omap_dss_get_num_overlays();
2127 for (i = 0; i < fbdev->num_overlays; i++)
2128 fbdev->overlays[i] = omap_dss_get_overlay(i);
2129
2130 fbdev->num_managers = omap_dss_get_num_overlay_managers();
2131 for (i = 0; i < fbdev->num_managers; i++)
2132 fbdev->managers[i] = omap_dss_get_overlay_manager(i);
2133
2134 if (def_mode && strlen(def_mode) > 0) {
2135 if (omapfb_parse_def_modes(fbdev))
2136 dev_warn(&pdev->dev, "cannot parse default modes\n");
2137 }
2138
2139 r = omapfb_create_framebuffers(fbdev);
2140 if (r)
2141 goto cleanup;
2142
2143 for (i = 0; i < fbdev->num_managers; i++) {
2144 struct omap_overlay_manager *mgr;
2145 mgr = fbdev->managers[i];
2146 r = mgr->apply(mgr);
2147 if (r)
2148 dev_warn(fbdev->dev, "failed to apply dispc config\n");
2149 }
2150
2151 DBG("mgr->apply'ed\n");
2152
2153 /* gfx overlay should be the default one. find a display
2154 * connected to that, and use it as default display */
2155 ovl = omap_dss_get_overlay(0);
2156 if (ovl->manager && ovl->manager->device) {
2157 def_display = ovl->manager->device;
2158 } else {
2159 dev_warn(&pdev->dev, "cannot find default display\n");
2160 def_display = NULL;
2161 }
2162
2163 if (def_display) {
2164#ifndef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
2165 u16 w, h;
2166#endif
2167 r = def_display->enable(def_display);
2168 if (r)
2169 dev_warn(fbdev->dev, "Failed to enable display '%s'\n",
2170 def_display->name);
2171
2172 /* set the update mode */
2173 if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
2174#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
2175 if (def_display->enable_te)
2176 def_display->enable_te(def_display, 1);
2177 if (def_display->set_update_mode)
2178 def_display->set_update_mode(def_display,
2179 OMAP_DSS_UPDATE_AUTO);
2180#else /* MANUAL_UPDATE */
2181 if (def_display->enable_te)
2182 def_display->enable_te(def_display, 0);
2183 if (def_display->set_update_mode)
2184 def_display->set_update_mode(def_display,
2185 OMAP_DSS_UPDATE_MANUAL);
2186
2187 def_display->get_resolution(def_display, &w, &h);
2188 def_display->update(def_display, 0, 0, w, h);
2189#endif
2190 } else {
2191 if (def_display->set_update_mode)
2192 def_display->set_update_mode(def_display,
2193 OMAP_DSS_UPDATE_AUTO);
2194 }
2195 }
2196
2197 return 0;
2198
2199cleanup:
2200 omapfb_free_resources(fbdev);
2201err0:
2202 dev_err(&pdev->dev, "failed to setup omapfb\n");
2203 return r;
2204}
2205
2206static int omapfb_remove(struct platform_device *pdev)
2207{
2208 struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
2209
2210 /* FIXME: wait till completion of pending events */
2211
2212 omapfb_remove_sysfs(fbdev);
2213
2214 omapfb_free_resources(fbdev);
2215
2216 return 0;
2217}
2218
2219static struct platform_driver omapfb_driver = {
2220 .probe = omapfb_probe,
2221 .remove = omapfb_remove,
2222 .driver = {
2223 .name = "omapfb",
2224 .owner = THIS_MODULE,
2225 },
2226};
2227
2228static int __init omapfb_init(void)
2229{
2230 DBG("omapfb_init\n");
2231
2232 if (platform_driver_register(&omapfb_driver)) {
2233 printk(KERN_ERR "failed to register omapfb driver\n");
2234 return -ENODEV;
2235 }
2236
2237 return 0;
2238}
2239
2240static void __exit omapfb_exit(void)
2241{
2242 DBG("omapfb_exit\n");
2243 platform_driver_unregister(&omapfb_driver);
2244}
2245
2246module_param_named(mode, def_mode, charp, 0);
2247module_param_named(vram, def_vram, charp, 0);
2248module_param_named(rotate, def_rotate, int, 0);
2249module_param_named(vrfb, def_vrfb, bool, 0);
2250module_param_named(mirror, def_mirror, bool, 0);
2251
2252/* late_initcall to let panel/ctrl drivers loaded first.
2253 * I guess better option would be a more dynamic approach,
2254 * so that omapfb reacts to new panels when they are loaded */
2255late_initcall(omapfb_init);
2256/*module_init(omapfb_init);*/
2257module_exit(omapfb_exit);
2258
2259MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
2260MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
2261MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
new file mode 100644
index 000000000000..62bb88f5c192
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -0,0 +1,507 @@
1/*
2 * linux/drivers/video/omap2/omapfb-sysfs.c
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/fb.h>
24#include <linux/sysfs.h>
25#include <linux/device.h>
26#include <linux/uaccess.h>
27#include <linux/platform_device.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/omapfb.h>
31
32#include <plat/display.h>
33#include <plat/vrfb.h>
34
35#include "omapfb.h"
36
37static ssize_t show_rotate_type(struct device *dev,
38 struct device_attribute *attr, char *buf)
39{
40 struct fb_info *fbi = dev_get_drvdata(dev);
41 struct omapfb_info *ofbi = FB2OFB(fbi);
42
43 return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->rotation_type);
44}
45
46static ssize_t store_rotate_type(struct device *dev,
47 struct device_attribute *attr,
48 const char *buf, size_t count)
49{
50 struct fb_info *fbi = dev_get_drvdata(dev);
51 struct omapfb_info *ofbi = FB2OFB(fbi);
52 enum omap_dss_rotation_type rot_type;
53 int r;
54
55 rot_type = simple_strtoul(buf, NULL, 0);
56
57 if (rot_type != OMAP_DSS_ROT_DMA && rot_type != OMAP_DSS_ROT_VRFB)
58 return -EINVAL;
59
60 lock_fb_info(fbi);
61
62 r = 0;
63 if (rot_type == ofbi->rotation_type)
64 goto out;
65
66 if (ofbi->region.size) {
67 r = -EBUSY;
68 goto out;
69 }
70
71 ofbi->rotation_type = rot_type;
72
73 /*
74 * Since the VRAM for this FB is not allocated at the moment we don't
75 * need to do any further parameter checking at this point.
76 */
77out:
78 unlock_fb_info(fbi);
79
80 return r ? r : count;
81}
82
83
84static ssize_t show_mirror(struct device *dev,
85 struct device_attribute *attr, char *buf)
86{
87 struct fb_info *fbi = dev_get_drvdata(dev);
88 struct omapfb_info *ofbi = FB2OFB(fbi);
89
90 return snprintf(buf, PAGE_SIZE, "%d\n", ofbi->mirror);
91}
92
93static ssize_t store_mirror(struct device *dev,
94 struct device_attribute *attr,
95 const char *buf, size_t count)
96{
97 struct fb_info *fbi = dev_get_drvdata(dev);
98 struct omapfb_info *ofbi = FB2OFB(fbi);
99 bool mirror;
100 int r;
101 struct fb_var_screeninfo new_var;
102
103 mirror = simple_strtoul(buf, NULL, 0);
104
105 if (mirror != 0 && mirror != 1)
106 return -EINVAL;
107
108 lock_fb_info(fbi);
109
110 ofbi->mirror = mirror;
111
112 memcpy(&new_var, &fbi->var, sizeof(new_var));
113 r = check_fb_var(fbi, &new_var);
114 if (r)
115 goto out;
116 memcpy(&fbi->var, &new_var, sizeof(fbi->var));
117
118 set_fb_fix(fbi);
119
120 r = omapfb_apply_changes(fbi, 0);
121 if (r)
122 goto out;
123
124 r = count;
125out:
126 unlock_fb_info(fbi);
127
128 return r;
129}
130
131static ssize_t show_overlays(struct device *dev,
132 struct device_attribute *attr, char *buf)
133{
134 struct fb_info *fbi = dev_get_drvdata(dev);
135 struct omapfb_info *ofbi = FB2OFB(fbi);
136 struct omapfb2_device *fbdev = ofbi->fbdev;
137 ssize_t l = 0;
138 int t;
139
140 omapfb_lock(fbdev);
141 lock_fb_info(fbi);
142
143 for (t = 0; t < ofbi->num_overlays; t++) {
144 struct omap_overlay *ovl = ofbi->overlays[t];
145 int ovlnum;
146
147 for (ovlnum = 0; ovlnum < fbdev->num_overlays; ++ovlnum)
148 if (ovl == fbdev->overlays[ovlnum])
149 break;
150
151 l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
152 t == 0 ? "" : ",", ovlnum);
153 }
154
155 l += snprintf(buf + l, PAGE_SIZE - l, "\n");
156
157 unlock_fb_info(fbi);
158 omapfb_unlock(fbdev);
159
160 return l;
161}
162
163static struct omapfb_info *get_overlay_fb(struct omapfb2_device *fbdev,
164 struct omap_overlay *ovl)
165{
166 int i, t;
167
168 for (i = 0; i < fbdev->num_fbs; i++) {
169 struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]);
170
171 for (t = 0; t < ofbi->num_overlays; t++) {
172 if (ofbi->overlays[t] == ovl)
173 return ofbi;
174 }
175 }
176
177 return NULL;
178}
179
180static ssize_t store_overlays(struct device *dev, struct device_attribute *attr,
181 const char *buf, size_t count)
182{
183 struct fb_info *fbi = dev_get_drvdata(dev);
184 struct omapfb_info *ofbi = FB2OFB(fbi);
185 struct omapfb2_device *fbdev = ofbi->fbdev;
186 struct omap_overlay *ovls[OMAPFB_MAX_OVL_PER_FB];
187 struct omap_overlay *ovl;
188 int num_ovls, r, i;
189 int len;
190 bool added = false;
191
192 num_ovls = 0;
193
194 len = strlen(buf);
195 if (buf[len - 1] == '\n')
196 len = len - 1;
197
198 omapfb_lock(fbdev);
199 lock_fb_info(fbi);
200
201 if (len > 0) {
202 char *p = (char *)buf;
203 int ovlnum;
204
205 while (p < buf + len) {
206 int found;
207 if (num_ovls == OMAPFB_MAX_OVL_PER_FB) {
208 r = -EINVAL;
209 goto out;
210 }
211
212 ovlnum = simple_strtoul(p, &p, 0);
213 if (ovlnum > fbdev->num_overlays) {
214 r = -EINVAL;
215 goto out;
216 }
217
218 found = 0;
219 for (i = 0; i < num_ovls; ++i) {
220 if (ovls[i] == fbdev->overlays[ovlnum]) {
221 found = 1;
222 break;
223 }
224 }
225
226 if (!found)
227 ovls[num_ovls++] = fbdev->overlays[ovlnum];
228
229 p++;
230 }
231 }
232
233 for (i = 0; i < num_ovls; ++i) {
234 struct omapfb_info *ofbi2 = get_overlay_fb(fbdev, ovls[i]);
235 if (ofbi2 && ofbi2 != ofbi) {
236 dev_err(fbdev->dev, "overlay already in use\n");
237 r = -EINVAL;
238 goto out;
239 }
240 }
241
242 /* detach unused overlays */
243 for (i = 0; i < ofbi->num_overlays; ++i) {
244 int t, found;
245
246 ovl = ofbi->overlays[i];
247
248 found = 0;
249
250 for (t = 0; t < num_ovls; ++t) {
251 if (ovl == ovls[t]) {
252 found = 1;
253 break;
254 }
255 }
256
257 if (found)
258 continue;
259
260 DBG("detaching %d\n", ofbi->overlays[i]->id);
261
262 omapfb_overlay_enable(ovl, 0);
263
264 if (ovl->manager)
265 ovl->manager->apply(ovl->manager);
266
267 for (t = i + 1; t < ofbi->num_overlays; t++) {
268 ofbi->rotation[t-1] = ofbi->rotation[t];
269 ofbi->overlays[t-1] = ofbi->overlays[t];
270 }
271
272 ofbi->num_overlays--;
273 i--;
274 }
275
276 for (i = 0; i < num_ovls; ++i) {
277 int t, found;
278
279 ovl = ovls[i];
280
281 found = 0;
282
283 for (t = 0; t < ofbi->num_overlays; ++t) {
284 if (ovl == ofbi->overlays[t]) {
285 found = 1;
286 break;
287 }
288 }
289
290 if (found)
291 continue;
292 ofbi->rotation[ofbi->num_overlays] = 0;
293 ofbi->overlays[ofbi->num_overlays++] = ovl;
294
295 added = true;
296 }
297
298 if (added) {
299 r = omapfb_apply_changes(fbi, 0);
300 if (r)
301 goto out;
302 }
303
304 r = count;
305out:
306 unlock_fb_info(fbi);
307 omapfb_unlock(fbdev);
308
309 return r;
310}
311
312static ssize_t show_overlays_rotate(struct device *dev,
313 struct device_attribute *attr, char *buf)
314{
315 struct fb_info *fbi = dev_get_drvdata(dev);
316 struct omapfb_info *ofbi = FB2OFB(fbi);
317 ssize_t l = 0;
318 int t;
319
320 lock_fb_info(fbi);
321
322 for (t = 0; t < ofbi->num_overlays; t++) {
323 l += snprintf(buf + l, PAGE_SIZE - l, "%s%d",
324 t == 0 ? "" : ",", ofbi->rotation[t]);
325 }
326
327 l += snprintf(buf + l, PAGE_SIZE - l, "\n");
328
329 unlock_fb_info(fbi);
330
331 return l;
332}
333
334static ssize_t store_overlays_rotate(struct device *dev,
335 struct device_attribute *attr, const char *buf, size_t count)
336{
337 struct fb_info *fbi = dev_get_drvdata(dev);
338 struct omapfb_info *ofbi = FB2OFB(fbi);
339 int num_ovls = 0, r, i;
340 int len;
341 bool changed = false;
342 u8 rotation[OMAPFB_MAX_OVL_PER_FB];
343
344 len = strlen(buf);
345 if (buf[len - 1] == '\n')
346 len = len - 1;
347
348 lock_fb_info(fbi);
349
350 if (len > 0) {
351 char *p = (char *)buf;
352
353 while (p < buf + len) {
354 int rot;
355
356 if (num_ovls == ofbi->num_overlays) {
357 r = -EINVAL;
358 goto out;
359 }
360
361 rot = simple_strtoul(p, &p, 0);
362 if (rot < 0 || rot > 3) {
363 r = -EINVAL;
364 goto out;
365 }
366
367 if (ofbi->rotation[num_ovls] != rot)
368 changed = true;
369
370 rotation[num_ovls++] = rot;
371
372 p++;
373 }
374 }
375
376 if (num_ovls != ofbi->num_overlays) {
377 r = -EINVAL;
378 goto out;
379 }
380
381 if (changed) {
382 for (i = 0; i < num_ovls; ++i)
383 ofbi->rotation[i] = rotation[i];
384
385 r = omapfb_apply_changes(fbi, 0);
386 if (r)
387 goto out;
388
389 /* FIXME error handling? */
390 }
391
392 r = count;
393out:
394 unlock_fb_info(fbi);
395
396 return r;
397}
398
399static ssize_t show_size(struct device *dev,
400 struct device_attribute *attr, char *buf)
401{
402 struct fb_info *fbi = dev_get_drvdata(dev);
403 struct omapfb_info *ofbi = FB2OFB(fbi);
404
405 return snprintf(buf, PAGE_SIZE, "%lu\n", ofbi->region.size);
406}
407
408static ssize_t store_size(struct device *dev, struct device_attribute *attr,
409 const char *buf, size_t count)
410{
411 struct fb_info *fbi = dev_get_drvdata(dev);
412 struct omapfb_info *ofbi = FB2OFB(fbi);
413 unsigned long size;
414 int r;
415 int i;
416
417 size = PAGE_ALIGN(simple_strtoul(buf, NULL, 0));
418
419 lock_fb_info(fbi);
420
421 for (i = 0; i < ofbi->num_overlays; i++) {
422 if (ofbi->overlays[i]->info.enabled) {
423 r = -EBUSY;
424 goto out;
425 }
426 }
427
428 if (size != ofbi->region.size) {
429 r = omapfb_realloc_fbmem(fbi, size, ofbi->region.type);
430 if (r) {
431 dev_err(dev, "realloc fbmem failed\n");
432 goto out;
433 }
434 }
435
436 r = count;
437out:
438 unlock_fb_info(fbi);
439
440 return r;
441}
442
443static ssize_t show_phys(struct device *dev,
444 struct device_attribute *attr, char *buf)
445{
446 struct fb_info *fbi = dev_get_drvdata(dev);
447 struct omapfb_info *ofbi = FB2OFB(fbi);
448
449 return snprintf(buf, PAGE_SIZE, "%0x\n", ofbi->region.paddr);
450}
451
452static ssize_t show_virt(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 struct fb_info *fbi = dev_get_drvdata(dev);
456 struct omapfb_info *ofbi = FB2OFB(fbi);
457
458 return snprintf(buf, PAGE_SIZE, "%p\n", ofbi->region.vaddr);
459}
460
461static struct device_attribute omapfb_attrs[] = {
462 __ATTR(rotate_type, S_IRUGO | S_IWUSR, show_rotate_type,
463 store_rotate_type),
464 __ATTR(mirror, S_IRUGO | S_IWUSR, show_mirror, store_mirror),
465 __ATTR(size, S_IRUGO | S_IWUSR, show_size, store_size),
466 __ATTR(overlays, S_IRUGO | S_IWUSR, show_overlays, store_overlays),
467 __ATTR(overlays_rotate, S_IRUGO | S_IWUSR, show_overlays_rotate,
468 store_overlays_rotate),
469 __ATTR(phys_addr, S_IRUGO, show_phys, NULL),
470 __ATTR(virt_addr, S_IRUGO, show_virt, NULL),
471};
472
473int omapfb_create_sysfs(struct omapfb2_device *fbdev)
474{
475 int i;
476 int r;
477
478 DBG("create sysfs for fbs\n");
479 for (i = 0; i < fbdev->num_fbs; i++) {
480 int t;
481 for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++) {
482 r = device_create_file(fbdev->fbs[i]->dev,
483 &omapfb_attrs[t]);
484
485 if (r) {
486 dev_err(fbdev->dev, "failed to create sysfs "
487 "file\n");
488 return r;
489 }
490 }
491 }
492
493 return 0;
494}
495
496void omapfb_remove_sysfs(struct omapfb2_device *fbdev)
497{
498 int i, t;
499
500 DBG("remove sysfs for fbs\n");
501 for (i = 0; i < fbdev->num_fbs; i++) {
502 for (t = 0; t < ARRAY_SIZE(omapfb_attrs); t++)
503 device_remove_file(fbdev->fbs[i]->dev,
504 &omapfb_attrs[t]);
505 }
506}
507
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
new file mode 100644
index 000000000000..f7c9c739e5ef
--- /dev/null
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -0,0 +1,146 @@
1/*
2 * linux/drivers/video/omap2/omapfb.h
3 *
4 * Copyright (C) 2008 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * Some code and ideas taken from drivers/video/omap/ driver
8 * by Imre Deak.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#ifndef __DRIVERS_VIDEO_OMAP2_OMAPFB_H__
24#define __DRIVERS_VIDEO_OMAP2_OMAPFB_H__
25
26#ifdef CONFIG_FB_OMAP2_DEBUG_SUPPORT
27#define DEBUG
28#endif
29
30#include <plat/display.h>
31
32#ifdef DEBUG
33extern unsigned int omapfb_debug;
34#define DBG(format, ...) \
35 if (omapfb_debug) \
36 printk(KERN_DEBUG "OMAPFB: " format, ## __VA_ARGS__)
37#else
38#define DBG(format, ...)
39#endif
40
41#define FB2OFB(fb_info) ((struct omapfb_info *)(fb_info->par))
42
43/* max number of overlays to which a framebuffer data can be direct */
44#define OMAPFB_MAX_OVL_PER_FB 3
45
46struct omapfb2_mem_region {
47 u32 paddr;
48 void __iomem *vaddr;
49 struct vrfb vrfb;
50 unsigned long size;
51 u8 type; /* OMAPFB_PLANE_MEM_* */
52 bool alloc; /* allocated by the driver */
53 bool map; /* kernel mapped by the driver */
54};
55
56/* appended to fb_info */
57struct omapfb_info {
58 int id;
59 struct omapfb2_mem_region region;
60 atomic_t map_count;
61 int num_overlays;
62 struct omap_overlay *overlays[OMAPFB_MAX_OVL_PER_FB];
63 struct omapfb2_device *fbdev;
64 enum omap_dss_rotation_type rotation_type;
65 u8 rotation[OMAPFB_MAX_OVL_PER_FB];
66 bool mirror;
67};
68
69struct omapfb2_device {
70 struct device *dev;
71 struct mutex mtx;
72
73 u32 pseudo_palette[17];
74
75 int state;
76
77 unsigned num_fbs;
78 struct fb_info *fbs[10];
79
80 unsigned num_displays;
81 struct omap_dss_device *displays[10];
82 unsigned num_overlays;
83 struct omap_overlay *overlays[10];
84 unsigned num_managers;
85 struct omap_overlay_manager *managers[10];
86};
87
88struct omapfb_colormode {
89 enum omap_color_mode dssmode;
90 u32 bits_per_pixel;
91 u32 nonstd;
92 struct fb_bitfield red;
93 struct fb_bitfield green;
94 struct fb_bitfield blue;
95 struct fb_bitfield transp;
96};
97
98void set_fb_fix(struct fb_info *fbi);
99int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var);
100int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type);
101int omapfb_apply_changes(struct fb_info *fbi, int init);
102
103int omapfb_create_sysfs(struct omapfb2_device *fbdev);
104void omapfb_remove_sysfs(struct omapfb2_device *fbdev);
105
106int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg);
107
108int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
109 struct fb_var_screeninfo *var);
110
111/* find the display connected to this fb, if any */
112static inline struct omap_dss_device *fb2display(struct fb_info *fbi)
113{
114 struct omapfb_info *ofbi = FB2OFB(fbi);
115 int i;
116
117 /* XXX: returns the display connected to first attached overlay */
118 for (i = 0; i < ofbi->num_overlays; i++) {
119 if (ofbi->overlays[i]->manager)
120 return ofbi->overlays[i]->manager->device;
121 }
122
123 return NULL;
124}
125
126static inline void omapfb_lock(struct omapfb2_device *fbdev)
127{
128 mutex_lock(&fbdev->mtx);
129}
130
131static inline void omapfb_unlock(struct omapfb2_device *fbdev)
132{
133 mutex_unlock(&fbdev->mtx);
134}
135
136static inline int omapfb_overlay_enable(struct omap_overlay *ovl,
137 int enable)
138{
139 struct omap_overlay_info info;
140
141 ovl->get_overlay_info(ovl, &info);
142 info.enabled = enable;
143 return ovl->set_overlay_info(ovl, &info);
144}
145
146#endif
diff --git a/drivers/video/omap2/vram.c b/drivers/video/omap2/vram.c
new file mode 100644
index 000000000000..55a4de5e5d10
--- /dev/null
+++ b/drivers/video/omap2/vram.c
@@ -0,0 +1,655 @@
1/*
2 * VRAM manager for OMAP
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*#define DEBUG*/
22
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/seq_file.h>
27#include <linux/bootmem.h>
28#include <linux/completion.h>
29#include <linux/debugfs.h>
30#include <linux/jiffies.h>
31#include <linux/module.h>
32
33#include <asm/setup.h>
34
35#include <plat/sram.h>
36#include <plat/vram.h>
37#include <plat/dma.h>
38
39#ifdef DEBUG
40#define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
41#else
42#define DBG(format, ...)
43#endif
44
45#define OMAP2_SRAM_START 0x40200000
46/* Maximum size, in reality this is smaller if SRAM is partially locked. */
47#define OMAP2_SRAM_SIZE 0xa0000 /* 640k */
48
49/* postponed regions are used to temporarily store region information at boot
50 * time when we cannot yet allocate the region list */
51#define MAX_POSTPONED_REGIONS 10
52
53static bool vram_initialized;
54static int postponed_cnt;
55static struct {
56 unsigned long paddr;
57 size_t size;
58} postponed_regions[MAX_POSTPONED_REGIONS];
59
60struct vram_alloc {
61 struct list_head list;
62 unsigned long paddr;
63 unsigned pages;
64};
65
66struct vram_region {
67 struct list_head list;
68 struct list_head alloc_list;
69 unsigned long paddr;
70 unsigned pages;
71};
72
73static DEFINE_MUTEX(region_mutex);
74static LIST_HEAD(region_list);
75
76static inline int region_mem_type(unsigned long paddr)
77{
78 if (paddr >= OMAP2_SRAM_START &&
79 paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE)
80 return OMAP_VRAM_MEMTYPE_SRAM;
81 else
82 return OMAP_VRAM_MEMTYPE_SDRAM;
83}
84
85static struct vram_region *omap_vram_create_region(unsigned long paddr,
86 unsigned pages)
87{
88 struct vram_region *rm;
89
90 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
91
92 if (rm) {
93 INIT_LIST_HEAD(&rm->alloc_list);
94 rm->paddr = paddr;
95 rm->pages = pages;
96 }
97
98 return rm;
99}
100
101#if 0
102static void omap_vram_free_region(struct vram_region *vr)
103{
104 list_del(&vr->list);
105 kfree(vr);
106}
107#endif
108
109static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
110 unsigned long paddr, unsigned pages)
111{
112 struct vram_alloc *va;
113 struct vram_alloc *new;
114
115 new = kzalloc(sizeof(*va), GFP_KERNEL);
116
117 if (!new)
118 return NULL;
119
120 new->paddr = paddr;
121 new->pages = pages;
122
123 list_for_each_entry(va, &vr->alloc_list, list) {
124 if (va->paddr > new->paddr)
125 break;
126 }
127
128 list_add_tail(&new->list, &va->list);
129
130 return new;
131}
132
133static void omap_vram_free_allocation(struct vram_alloc *va)
134{
135 list_del(&va->list);
136 kfree(va);
137}
138
139int omap_vram_add_region(unsigned long paddr, size_t size)
140{
141 struct vram_region *rm;
142 unsigned pages;
143
144 if (vram_initialized) {
145 DBG("adding region paddr %08lx size %d\n",
146 paddr, size);
147
148 size &= PAGE_MASK;
149 pages = size >> PAGE_SHIFT;
150
151 rm = omap_vram_create_region(paddr, pages);
152 if (rm == NULL)
153 return -ENOMEM;
154
155 list_add(&rm->list, &region_list);
156 } else {
157 if (postponed_cnt == MAX_POSTPONED_REGIONS)
158 return -ENOMEM;
159
160 postponed_regions[postponed_cnt].paddr = paddr;
161 postponed_regions[postponed_cnt].size = size;
162
163 ++postponed_cnt;
164 }
165 return 0;
166}
167
168int omap_vram_free(unsigned long paddr, size_t size)
169{
170 struct vram_region *rm;
171 struct vram_alloc *alloc;
172 unsigned start, end;
173
174 DBG("free mem paddr %08lx size %d\n", paddr, size);
175
176 size = PAGE_ALIGN(size);
177
178 mutex_lock(&region_mutex);
179
180 list_for_each_entry(rm, &region_list, list) {
181 list_for_each_entry(alloc, &rm->alloc_list, list) {
182 start = alloc->paddr;
183 end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
184
185 if (start >= paddr && end < paddr + size)
186 goto found;
187 }
188 }
189
190 mutex_unlock(&region_mutex);
191 return -EINVAL;
192
193found:
194 omap_vram_free_allocation(alloc);
195
196 mutex_unlock(&region_mutex);
197 return 0;
198}
199EXPORT_SYMBOL(omap_vram_free);
200
201static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
202{
203 struct vram_region *rm;
204 struct vram_alloc *alloc;
205 size_t size;
206
207 size = pages << PAGE_SHIFT;
208
209 list_for_each_entry(rm, &region_list, list) {
210 unsigned long start, end;
211
212 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
213
214 if (region_mem_type(rm->paddr) != region_mem_type(paddr))
215 continue;
216
217 start = rm->paddr;
218 end = start + (rm->pages << PAGE_SHIFT) - 1;
219 if (start > paddr || end < paddr + size - 1)
220 continue;
221
222 DBG("block ok, checking allocs\n");
223
224 list_for_each_entry(alloc, &rm->alloc_list, list) {
225 end = alloc->paddr - 1;
226
227 if (start <= paddr && end >= paddr + size - 1)
228 goto found;
229
230 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
231 }
232
233 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
234
235 if (!(start <= paddr && end >= paddr + size - 1))
236 continue;
237found:
238 DBG("found area start %lx, end %lx\n", start, end);
239
240 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
241 return -ENOMEM;
242
243 return 0;
244 }
245
246 return -ENOMEM;
247}
248
249int omap_vram_reserve(unsigned long paddr, size_t size)
250{
251 unsigned pages;
252 int r;
253
254 DBG("reserve mem paddr %08lx size %d\n", paddr, size);
255
256 size = PAGE_ALIGN(size);
257 pages = size >> PAGE_SHIFT;
258
259 mutex_lock(&region_mutex);
260
261 r = _omap_vram_reserve(paddr, pages);
262
263 mutex_unlock(&region_mutex);
264
265 return r;
266}
267EXPORT_SYMBOL(omap_vram_reserve);
268
269static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
270{
271 struct completion *compl = data;
272 complete(compl);
273}
274
275static int _omap_vram_clear(u32 paddr, unsigned pages)
276{
277 struct completion compl;
278 unsigned elem_count;
279 unsigned frame_count;
280 int r;
281 int lch;
282
283 init_completion(&compl);
284
285 r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
286 _omap_vram_dma_cb,
287 &compl, &lch);
288 if (r) {
289 pr_err("VRAM: request_dma failed for memory clear\n");
290 return -EBUSY;
291 }
292
293 elem_count = pages * PAGE_SIZE / 4;
294 frame_count = 1;
295
296 omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
297 elem_count, frame_count,
298 OMAP_DMA_SYNC_ELEMENT,
299 0, 0);
300
301 omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
302 paddr, 0, 0);
303
304 omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
305
306 omap_start_dma(lch);
307
308 if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
309 omap_stop_dma(lch);
310 pr_err("VRAM: dma timeout while clearing memory\n");
311 r = -EIO;
312 goto err;
313 }
314
315 r = 0;
316err:
317 omap_free_dma(lch);
318
319 return r;
320}
321
322static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr)
323{
324 struct vram_region *rm;
325 struct vram_alloc *alloc;
326
327 list_for_each_entry(rm, &region_list, list) {
328 unsigned long start, end;
329
330 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
331
332 if (region_mem_type(rm->paddr) != mtype)
333 continue;
334
335 start = rm->paddr;
336
337 list_for_each_entry(alloc, &rm->alloc_list, list) {
338 end = alloc->paddr;
339
340 if (end - start >= pages << PAGE_SHIFT)
341 goto found;
342
343 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
344 }
345
346 end = rm->paddr + (rm->pages << PAGE_SHIFT);
347found:
348 if (end - start < pages << PAGE_SHIFT)
349 continue;
350
351 DBG("found %lx, end %lx\n", start, end);
352
353 alloc = omap_vram_create_allocation(rm, start, pages);
354 if (alloc == NULL)
355 return -ENOMEM;
356
357 *paddr = start;
358
359 _omap_vram_clear(start, pages);
360
361 return 0;
362 }
363
364 return -ENOMEM;
365}
366
367int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr)
368{
369 unsigned pages;
370 int r;
371
372 BUG_ON(mtype > OMAP_VRAM_MEMTYPE_MAX || !size);
373
374 DBG("alloc mem type %d size %d\n", mtype, size);
375
376 size = PAGE_ALIGN(size);
377 pages = size >> PAGE_SHIFT;
378
379 mutex_lock(&region_mutex);
380
381 r = _omap_vram_alloc(mtype, pages, paddr);
382
383 mutex_unlock(&region_mutex);
384
385 return r;
386}
387EXPORT_SYMBOL(omap_vram_alloc);
388
389void omap_vram_get_info(unsigned long *vram,
390 unsigned long *free_vram,
391 unsigned long *largest_free_block)
392{
393 struct vram_region *vr;
394 struct vram_alloc *va;
395
396 *vram = 0;
397 *free_vram = 0;
398 *largest_free_block = 0;
399
400 mutex_lock(&region_mutex);
401
402 list_for_each_entry(vr, &region_list, list) {
403 unsigned free;
404 unsigned long pa;
405
406 pa = vr->paddr;
407 *vram += vr->pages << PAGE_SHIFT;
408
409 list_for_each_entry(va, &vr->alloc_list, list) {
410 free = va->paddr - pa;
411 *free_vram += free;
412 if (free > *largest_free_block)
413 *largest_free_block = free;
414 pa = va->paddr + (va->pages << PAGE_SHIFT);
415 }
416
417 free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
418 *free_vram += free;
419 if (free > *largest_free_block)
420 *largest_free_block = free;
421 }
422
423 mutex_unlock(&region_mutex);
424}
425EXPORT_SYMBOL(omap_vram_get_info);
426
427#if defined(CONFIG_DEBUG_FS)
428static int vram_debug_show(struct seq_file *s, void *unused)
429{
430 struct vram_region *vr;
431 struct vram_alloc *va;
432 unsigned size;
433
434 mutex_lock(&region_mutex);
435
436 list_for_each_entry(vr, &region_list, list) {
437 size = vr->pages << PAGE_SHIFT;
438 seq_printf(s, "%08lx-%08lx (%d bytes)\n",
439 vr->paddr, vr->paddr + size - 1,
440 size);
441
442 list_for_each_entry(va, &vr->alloc_list, list) {
443 size = va->pages << PAGE_SHIFT;
444 seq_printf(s, " %08lx-%08lx (%d bytes)\n",
445 va->paddr, va->paddr + size - 1,
446 size);
447 }
448 }
449
450 mutex_unlock(&region_mutex);
451
452 return 0;
453}
454
455static int vram_debug_open(struct inode *inode, struct file *file)
456{
457 return single_open(file, vram_debug_show, inode->i_private);
458}
459
460static const struct file_operations vram_debug_fops = {
461 .open = vram_debug_open,
462 .read = seq_read,
463 .llseek = seq_lseek,
464 .release = single_release,
465};
466
467static int __init omap_vram_create_debugfs(void)
468{
469 struct dentry *d;
470
471 d = debugfs_create_file("vram", S_IRUGO, NULL,
472 NULL, &vram_debug_fops);
473 if (IS_ERR(d))
474 return PTR_ERR(d);
475
476 return 0;
477}
478#endif
479
480static __init int omap_vram_init(void)
481{
482 int i;
483
484 vram_initialized = 1;
485
486 for (i = 0; i < postponed_cnt; i++)
487 omap_vram_add_region(postponed_regions[i].paddr,
488 postponed_regions[i].size);
489
490#ifdef CONFIG_DEBUG_FS
491 if (omap_vram_create_debugfs())
492 pr_err("VRAM: Failed to create debugfs file\n");
493#endif
494
495 return 0;
496}
497
498arch_initcall(omap_vram_init);
499
500/* boottime vram alloc stuff */
501
502/* set from board file */
503static u32 omap_vram_sram_start __initdata;
504static u32 omap_vram_sram_size __initdata;
505
506/* set from board file */
507static u32 omap_vram_sdram_start __initdata;
508static u32 omap_vram_sdram_size __initdata;
509
510/* set from kernel cmdline */
511static u32 omap_vram_def_sdram_size __initdata;
512static u32 omap_vram_def_sdram_start __initdata;
513
514static void __init omap_vram_early_vram(char **p)
515{
516 omap_vram_def_sdram_size = memparse(*p, p);
517 if (**p == ',')
518 omap_vram_def_sdram_start = simple_strtoul((*p) + 1, p, 16);
519}
520__early_param("vram=", omap_vram_early_vram);
521
522/*
523 * Called from map_io. We need to call to this early enough so that we
524 * can reserve the fixed SDRAM regions before VM could get hold of them.
525 */
526void __init omap_vram_reserve_sdram(void)
527{
528 struct bootmem_data *bdata;
529 unsigned long sdram_start, sdram_size;
530 u32 paddr;
531 u32 size = 0;
532
533 /* cmdline arg overrides the board file definition */
534 if (omap_vram_def_sdram_size) {
535 size = omap_vram_def_sdram_size;
536 paddr = omap_vram_def_sdram_start;
537 }
538
539 if (!size) {
540 size = omap_vram_sdram_size;
541 paddr = omap_vram_sdram_start;
542 }
543
544#ifdef CONFIG_OMAP2_VRAM_SIZE
545 if (!size) {
546 size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
547 paddr = 0;
548 }
549#endif
550
551 if (!size)
552 return;
553
554 size = PAGE_ALIGN(size);
555
556 bdata = NODE_DATA(0)->bdata;
557 sdram_start = bdata->node_min_pfn << PAGE_SHIFT;
558 sdram_size = (bdata->node_low_pfn << PAGE_SHIFT) - sdram_start;
559
560 if (paddr) {
561 if ((paddr & ~PAGE_MASK) || paddr < sdram_start ||
562 paddr + size > sdram_start + sdram_size) {
563 pr_err("Illegal SDRAM region for VRAM\n");
564 return;
565 }
566
567 if (reserve_bootmem(paddr, size, BOOTMEM_EXCLUSIVE) < 0) {
568 pr_err("FB: failed to reserve VRAM\n");
569 return;
570 }
571 } else {
572 if (size > sdram_size) {
573 pr_err("Illegal SDRAM size for VRAM\n");
574 return;
575 }
576
577 paddr = virt_to_phys(alloc_bootmem_pages(size));
578 BUG_ON(paddr & ~PAGE_MASK);
579 }
580
581 omap_vram_add_region(paddr, size);
582
583 pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
584}
585
586/*
587 * Called at sram init time, before anything is pushed to the SRAM stack.
588 * Because of the stack scheme, we will allocate everything from the
589 * start of the lowest address region to the end of SRAM. This will also
590 * include padding for page alignment and possible holes between regions.
591 *
592 * As opposed to the SDRAM case, we'll also do any dynamic allocations at
593 * this point, since the driver built as a module would have problem with
594 * freeing / reallocating the regions.
595 */
596unsigned long __init omap_vram_reserve_sram(unsigned long sram_pstart,
597 unsigned long sram_vstart,
598 unsigned long sram_size,
599 unsigned long pstart_avail,
600 unsigned long size_avail)
601{
602 unsigned long pend_avail;
603 unsigned long reserved;
604 u32 paddr;
605 u32 size;
606
607 paddr = omap_vram_sram_start;
608 size = omap_vram_sram_size;
609
610 if (!size)
611 return 0;
612
613 reserved = 0;
614 pend_avail = pstart_avail + size_avail;
615
616 if (!paddr) {
617 /* Dynamic allocation */
618 if ((size_avail & PAGE_MASK) < size) {
619 pr_err("Not enough SRAM for VRAM\n");
620 return 0;
621 }
622 size_avail = (size_avail - size) & PAGE_MASK;
623 paddr = pstart_avail + size_avail;
624 }
625
626 if (paddr < sram_pstart ||
627 paddr + size > sram_pstart + sram_size) {
628 pr_err("Illegal SRAM region for VRAM\n");
629 return 0;
630 }
631
632 /* Reserve everything above the start of the region. */
633 if (pend_avail - paddr > reserved)
634 reserved = pend_avail - paddr;
635 size_avail = pend_avail - reserved - pstart_avail;
636
637 omap_vram_add_region(paddr, size);
638
639 if (reserved)
640 pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved);
641
642 return reserved;
643}
644
645void __init omap_vram_set_sdram_vram(u32 size, u32 start)
646{
647 omap_vram_sdram_start = start;
648 omap_vram_sdram_size = size;
649}
650
651void __init omap_vram_set_sram_vram(u32 size, u32 start)
652{
653 omap_vram_sram_start = start;
654 omap_vram_sram_size = size;
655}
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
new file mode 100644
index 000000000000..fd2271600370
--- /dev/null
+++ b/drivers/video/omap2/vrfb.c
@@ -0,0 +1,315 @@
1/*
2 * VRFB Rotation Engine
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*#define DEBUG*/
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/io.h>
27#include <linux/bitops.h>
28#include <linux/mutex.h>
29
30#include <mach/io.h>
31#include <plat/vrfb.h>
32#include <plat/sdrc.h>
33
34#ifdef DEBUG
35#define DBG(format, ...) pr_debug("VRFB: " format, ## __VA_ARGS__)
36#else
37#define DBG(format, ...)
38#endif
39
40#define SMS_ROT_VIRT_BASE(context, rot) \
41 (((context >= 4) ? 0xD0000000 : 0x70000000) \
42 + (0x4000000 * (context)) \
43 + (0x1000000 * (rot)))
44
45#define OMAP_VRFB_SIZE (2048 * 2048 * 4)
46
47#define VRFB_PAGE_WIDTH_EXP 5 /* Assuming SDRAM pagesize= 1024 */
48#define VRFB_PAGE_HEIGHT_EXP 5 /* 1024 = 2^5 * 2^5 */
49#define VRFB_PAGE_WIDTH (1 << VRFB_PAGE_WIDTH_EXP)
50#define VRFB_PAGE_HEIGHT (1 << VRFB_PAGE_HEIGHT_EXP)
51#define SMS_IMAGEHEIGHT_OFFSET 16
52#define SMS_IMAGEWIDTH_OFFSET 0
53#define SMS_PH_OFFSET 8
54#define SMS_PW_OFFSET 4
55#define SMS_PS_OFFSET 0
56
57#define VRFB_NUM_CTXS 12
58/* bitmap of reserved contexts */
59static unsigned long ctx_map;
60
61static DEFINE_MUTEX(ctx_lock);
62
63/*
64 * Access to this happens from client drivers or the PM core after wake-up.
65 * For the first case we require locking at the driver level, for the second
66 * we don't need locking, since no drivers will run until after the wake-up
67 * has finished.
68 */
69static struct {
70 u32 physical_ba;
71 u32 control;
72 u32 size;
73} vrfb_hw_context[VRFB_NUM_CTXS];
74
75static inline void restore_hw_context(int ctx)
76{
77 omap2_sms_write_rot_control(vrfb_hw_context[ctx].control, ctx);
78 omap2_sms_write_rot_size(vrfb_hw_context[ctx].size, ctx);
79 omap2_sms_write_rot_physical_ba(vrfb_hw_context[ctx].physical_ba, ctx);
80}
81
82static u32 get_image_width_roundup(u16 width, u8 bytespp)
83{
84 unsigned long stride = width * bytespp;
85 unsigned long ceil_pages_per_stride = (stride / VRFB_PAGE_WIDTH) +
86 (stride % VRFB_PAGE_WIDTH != 0);
87
88 return ceil_pages_per_stride * VRFB_PAGE_WIDTH / bytespp;
89}
90
91/*
92 * This the extra space needed in the VRFB physical area for VRFB to safely wrap
93 * any memory accesses to the invisible part of the virtual view to the physical
94 * area.
95 */
96static inline u32 get_extra_physical_size(u16 image_width_roundup, u8 bytespp)
97{
98 return (OMAP_VRFB_LINE_LEN - image_width_roundup) * VRFB_PAGE_HEIGHT *
99 bytespp;
100}
101
102void omap_vrfb_restore_context(void)
103{
104 int i;
105 unsigned long map = ctx_map;
106
107 for (i = ffs(map); i; i = ffs(map)) {
108 /* i=1..32 */
109 i--;
110 map &= ~(1 << i);
111 restore_hw_context(i);
112 }
113}
114
115void omap_vrfb_adjust_size(u16 *width, u16 *height,
116 u8 bytespp)
117{
118 *width = ALIGN(*width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
119 *height = ALIGN(*height, VRFB_PAGE_HEIGHT);
120}
121EXPORT_SYMBOL(omap_vrfb_adjust_size);
122
123u32 omap_vrfb_min_phys_size(u16 width, u16 height, u8 bytespp)
124{
125 unsigned long image_width_roundup = get_image_width_roundup(width,
126 bytespp);
127
128 if (image_width_roundup > OMAP_VRFB_LINE_LEN)
129 return 0;
130
131 return (width * height * bytespp) + get_extra_physical_size(
132 image_width_roundup, bytespp);
133}
134EXPORT_SYMBOL(omap_vrfb_min_phys_size);
135
136u16 omap_vrfb_max_height(u32 phys_size, u16 width, u8 bytespp)
137{
138 unsigned long image_width_roundup = get_image_width_roundup(width,
139 bytespp);
140 unsigned long height;
141 unsigned long extra;
142
143 if (image_width_roundup > OMAP_VRFB_LINE_LEN)
144 return 0;
145
146 extra = get_extra_physical_size(image_width_roundup, bytespp);
147
148 if (phys_size < extra)
149 return 0;
150
151 height = (phys_size - extra) / (width * bytespp);
152
153 /* Virtual views provided by VRFB are limited to 2048x2048. */
154 return min_t(unsigned long, height, 2048);
155}
156EXPORT_SYMBOL(omap_vrfb_max_height);
157
158void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
159 u16 width, u16 height,
160 unsigned bytespp, bool yuv_mode)
161{
162 unsigned pixel_size_exp;
163 u16 vrfb_width;
164 u16 vrfb_height;
165 u8 ctx = vrfb->context;
166 u32 size;
167 u32 control;
168
169 DBG("omapfb_set_vrfb(%d, %lx, %dx%d, %d, %d)\n", ctx, paddr,
170 width, height, bytespp, yuv_mode);
171
172 /* For YUV2 and UYVY modes VRFB needs to handle pixels a bit
173 * differently. See TRM. */
174 if (yuv_mode) {
175 bytespp *= 2;
176 width /= 2;
177 }
178
179 if (bytespp == 4)
180 pixel_size_exp = 2;
181 else if (bytespp == 2)
182 pixel_size_exp = 1;
183 else
184 BUG();
185
186 vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
187 vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
188
189 DBG("vrfb w %u, h %u bytespp %d\n", vrfb_width, vrfb_height, bytespp);
190
191 size = vrfb_width << SMS_IMAGEWIDTH_OFFSET;
192 size |= vrfb_height << SMS_IMAGEHEIGHT_OFFSET;
193
194 control = pixel_size_exp << SMS_PS_OFFSET;
195 control |= VRFB_PAGE_WIDTH_EXP << SMS_PW_OFFSET;
196 control |= VRFB_PAGE_HEIGHT_EXP << SMS_PH_OFFSET;
197
198 vrfb_hw_context[ctx].physical_ba = paddr;
199 vrfb_hw_context[ctx].size = size;
200 vrfb_hw_context[ctx].control = control;
201
202 omap2_sms_write_rot_physical_ba(paddr, ctx);
203 omap2_sms_write_rot_size(size, ctx);
204 omap2_sms_write_rot_control(control, ctx);
205
206 DBG("vrfb offset pixels %d, %d\n",
207 vrfb_width - width, vrfb_height - height);
208
209 vrfb->xres = width;
210 vrfb->yres = height;
211 vrfb->xoffset = vrfb_width - width;
212 vrfb->yoffset = vrfb_height - height;
213 vrfb->bytespp = bytespp;
214 vrfb->yuv_mode = yuv_mode;
215}
216EXPORT_SYMBOL(omap_vrfb_setup);
217
218int omap_vrfb_map_angle(struct vrfb *vrfb, u16 height, u8 rot)
219{
220 unsigned long size = height * OMAP_VRFB_LINE_LEN * vrfb->bytespp;
221
222 vrfb->vaddr[rot] = ioremap_wc(vrfb->paddr[rot], size);
223
224 if (!vrfb->vaddr[rot]) {
225 printk(KERN_ERR "vrfb: ioremap failed\n");
226 return -ENOMEM;
227 }
228
229 DBG("ioremapped vrfb area %d of size %lu into %p\n", rot, size,
230 vrfb->vaddr[rot]);
231
232 return 0;
233}
234EXPORT_SYMBOL(omap_vrfb_map_angle);
235
236void omap_vrfb_release_ctx(struct vrfb *vrfb)
237{
238 int rot;
239 int ctx = vrfb->context;
240
241 if (ctx == 0xff)
242 return;
243
244 DBG("release ctx %d\n", ctx);
245
246 mutex_lock(&ctx_lock);
247
248 BUG_ON(!(ctx_map & (1 << ctx)));
249
250 clear_bit(ctx, &ctx_map);
251
252 for (rot = 0; rot < 4; ++rot) {
253 if (vrfb->paddr[rot]) {
254 release_mem_region(vrfb->paddr[rot], OMAP_VRFB_SIZE);
255 vrfb->paddr[rot] = 0;
256 }
257 }
258
259 vrfb->context = 0xff;
260
261 mutex_unlock(&ctx_lock);
262}
263EXPORT_SYMBOL(omap_vrfb_release_ctx);
264
265int omap_vrfb_request_ctx(struct vrfb *vrfb)
266{
267 int rot;
268 u32 paddr;
269 u8 ctx;
270 int r;
271
272 DBG("request ctx\n");
273
274 mutex_lock(&ctx_lock);
275
276 for (ctx = 0; ctx < VRFB_NUM_CTXS; ++ctx)
277 if ((ctx_map & (1 << ctx)) == 0)
278 break;
279
280 if (ctx == VRFB_NUM_CTXS) {
281 pr_err("vrfb: no free contexts\n");
282 r = -EBUSY;
283 goto out;
284 }
285
286 DBG("found free ctx %d\n", ctx);
287
288 set_bit(ctx, &ctx_map);
289
290 memset(vrfb, 0, sizeof(*vrfb));
291
292 vrfb->context = ctx;
293
294 for (rot = 0; rot < 4; ++rot) {
295 paddr = SMS_ROT_VIRT_BASE(ctx, rot);
296 if (!request_mem_region(paddr, OMAP_VRFB_SIZE, "vrfb")) {
297 pr_err("vrfb: failed to reserve VRFB "
298 "area for ctx %d, rotation %d\n",
299 ctx, rot * 90);
300 omap_vrfb_release_ctx(vrfb);
301 r = -ENOMEM;
302 goto out;
303 }
304
305 vrfb->paddr[rot] = paddr;
306
307 DBG("VRFB %d/%d: %lx\n", ctx, rot*90, vrfb->paddr[rot]);
308 }
309
310 r = 0;
311out:
312 mutex_unlock(&ctx_lock);
313 return r;
314}
315EXPORT_SYMBOL(omap_vrfb_request_ctx);
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index 91a68e9eb66d..603598f4dbb1 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -25,7 +25,10 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/mm.h> 27#include <linux/mm.h>
28
28#include <asm/xen/hypervisor.h> 29#include <asm/xen/hypervisor.h>
30
31#include <xen/xen.h>
29#include <xen/events.h> 32#include <xen/events.h>
30#include <xen/page.h> 33#include <xen/page.h>
31#include <xen/interface/io/fbif.h> 34#include <xen/interface/io/fbif.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 420433613584..f6738d8b02bc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -52,6 +52,8 @@
52 52
53#include <asm/xen/hypervisor.h> 53#include <asm/xen/hypervisor.h>
54#include <asm/xen/hypercall.h> 54#include <asm/xen/hypercall.h>
55
56#include <xen/xen.h>
55#include <xen/interface/xen.h> 57#include <xen/interface/xen.h>
56#include <xen/interface/memory.h> 58#include <xen/interface/memory.h>
57#include <xen/xenbus.h> 59#include <xen/xenbus.h>
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 0f765a920189..14e2d995e958 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -1,5 +1,6 @@
1#include <linux/notifier.h> 1#include <linux/notifier.h>
2 2
3#include <xen/xen.h>
3#include <xen/xenbus.h> 4#include <xen/xenbus.h>
4 5
5#include <asm/xen/hypervisor.h> 6#include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 79bedba44fee..f70a4f4698c5 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -48,6 +48,8 @@
48#include <linux/gfp.h> 48#include <linux/gfp.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/cpu.h> 50#include <linux/cpu.h>
51
52#include <xen/xen.h>
51#include <xen/events.h> 53#include <xen/events.h>
52#include <xen/evtchn.h> 54#include <xen/evtchn.h>
53#include <asm/xen/hypervisor.h> 55#include <asm/xen/hypervisor.h>
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7d8f531fb8e8..4c6c0bd636a8 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -37,6 +37,7 @@
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/uaccess.h> 38#include <linux/uaccess.h>
39 39
40#include <xen/xen.h>
40#include <xen/interface/xen.h> 41#include <xen/interface/xen.h>
41#include <xen/page.h> 42#include <xen/page.h>
42#include <xen/grant_table.h> 43#include <xen/grant_table.h>
diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
index 88a60e03ccf0..ae5cb05a1a1c 100644
--- a/drivers/xen/sys-hypervisor.c
+++ b/drivers/xen/sys-hypervisor.c
@@ -14,6 +14,7 @@
14#include <asm/xen/hypervisor.h> 14#include <asm/xen/hypervisor.h>
15#include <asm/xen/hypercall.h> 15#include <asm/xen/hypercall.h>
16 16
17#include <xen/xen.h>
17#include <xen/xenbus.h> 18#include <xen/xenbus.h>
18#include <xen/interface/xen.h> 19#include <xen/interface/xen.h>
19#include <xen/interface/version.h> 20#include <xen/interface/version.h>
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 649fcdf114b7..2f7aaa99dc47 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -49,6 +49,8 @@
49#include <asm/page.h> 49#include <asm/page.h>
50#include <asm/pgtable.h> 50#include <asm/pgtable.h>
51#include <asm/xen/hypervisor.h> 51#include <asm/xen/hypervisor.h>
52
53#include <xen/xen.h>
52#include <xen/xenbus.h> 54#include <xen/xenbus.h>
53#include <xen/events.h> 55#include <xen/events.h>
54#include <xen/page.h> 56#include <xen/page.h>
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 6559e0c752ce..8924d93136f1 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -13,6 +13,8 @@
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/magic.h> 14#include <linux/magic.h>
15 15
16#include <xen/xen.h>
17
16#include "xenfs.h" 18#include "xenfs.h"
17 19
18#include <asm/xen/hypervisor.h> 20#include <asm/xen/hypervisor.h>
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index d69e6ae59251..3f959f1879d8 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -142,29 +142,75 @@ static void nilfs_palloc_desc_block_init(struct inode *inode,
142 } 142 }
143} 143}
144 144
145static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
146 int create,
147 void (*init_block)(struct inode *,
148 struct buffer_head *,
149 void *),
150 struct buffer_head **bhp,
151 struct nilfs_bh_assoc *prev,
152 spinlock_t *lock)
153{
154 int ret;
155
156 spin_lock(lock);
157 if (prev->bh && blkoff == prev->blkoff) {
158 get_bh(prev->bh);
159 *bhp = prev->bh;
160 spin_unlock(lock);
161 return 0;
162 }
163 spin_unlock(lock);
164
165 ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
166 if (!ret) {
167 spin_lock(lock);
168 /*
169 * The following code must be safe for change of the
170 * cache contents during the get block call.
171 */
172 brelse(prev->bh);
173 get_bh(*bhp);
174 prev->bh = *bhp;
175 prev->blkoff = blkoff;
176 spin_unlock(lock);
177 }
178 return ret;
179}
180
145static int nilfs_palloc_get_desc_block(struct inode *inode, 181static int nilfs_palloc_get_desc_block(struct inode *inode,
146 unsigned long group, 182 unsigned long group,
147 int create, struct buffer_head **bhp) 183 int create, struct buffer_head **bhp)
148{ 184{
149 return nilfs_mdt_get_block(inode, 185 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
150 nilfs_palloc_desc_blkoff(inode, group), 186
151 create, nilfs_palloc_desc_block_init, bhp); 187 return nilfs_palloc_get_block(inode,
188 nilfs_palloc_desc_blkoff(inode, group),
189 create, nilfs_palloc_desc_block_init,
190 bhp, &cache->prev_desc, &cache->lock);
152} 191}
153 192
154static int nilfs_palloc_get_bitmap_block(struct inode *inode, 193static int nilfs_palloc_get_bitmap_block(struct inode *inode,
155 unsigned long group, 194 unsigned long group,
156 int create, struct buffer_head **bhp) 195 int create, struct buffer_head **bhp)
157{ 196{
158 return nilfs_mdt_get_block(inode, 197 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
159 nilfs_palloc_bitmap_blkoff(inode, group), 198
160 create, NULL, bhp); 199 return nilfs_palloc_get_block(inode,
200 nilfs_palloc_bitmap_blkoff(inode, group),
201 create, NULL, bhp,
202 &cache->prev_bitmap, &cache->lock);
161} 203}
162 204
163int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, 205int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
164 int create, struct buffer_head **bhp) 206 int create, struct buffer_head **bhp)
165{ 207{
166 return nilfs_mdt_get_block(inode, nilfs_palloc_entry_blkoff(inode, nr), 208 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
167 create, NULL, bhp); 209
210 return nilfs_palloc_get_block(inode,
211 nilfs_palloc_entry_blkoff(inode, nr),
212 create, NULL, bhp,
213 &cache->prev_entry, &cache->lock);
168} 214}
169 215
170static struct nilfs_palloc_group_desc * 216static struct nilfs_palloc_group_desc *
@@ -176,13 +222,6 @@ nilfs_palloc_block_get_group_desc(const struct inode *inode,
176 group % nilfs_palloc_groups_per_desc_block(inode); 222 group % nilfs_palloc_groups_per_desc_block(inode);
177} 223}
178 224
179static unsigned char *
180nilfs_palloc_block_get_bitmap(const struct inode *inode,
181 const struct buffer_head *bh, void *kaddr)
182{
183 return (unsigned char *)(kaddr + bh_offset(bh));
184}
185
186void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr, 225void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr,
187 const struct buffer_head *bh, void *kaddr) 226 const struct buffer_head *bh, void *kaddr)
188{ 227{
@@ -289,8 +328,7 @@ int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
289 if (ret < 0) 328 if (ret < 0)
290 goto out_desc; 329 goto out_desc;
291 bitmap_kaddr = kmap(bitmap_bh->b_page); 330 bitmap_kaddr = kmap(bitmap_bh->b_page);
292 bitmap = nilfs_palloc_block_get_bitmap( 331 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
293 inode, bitmap_bh, bitmap_kaddr);
294 pos = nilfs_palloc_find_available_slot( 332 pos = nilfs_palloc_find_available_slot(
295 inode, group, group_offset, bitmap, 333 inode, group, group_offset, bitmap,
296 entries_per_group); 334 entries_per_group);
@@ -351,8 +389,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
351 desc = nilfs_palloc_block_get_group_desc(inode, group, 389 desc = nilfs_palloc_block_get_group_desc(inode, group,
352 req->pr_desc_bh, desc_kaddr); 390 req->pr_desc_bh, desc_kaddr);
353 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); 391 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
354 bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh, 392 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
355 bitmap_kaddr);
356 393
357 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), 394 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
358 group_offset, bitmap)) 395 group_offset, bitmap))
@@ -385,8 +422,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
385 desc = nilfs_palloc_block_get_group_desc(inode, group, 422 desc = nilfs_palloc_block_get_group_desc(inode, group,
386 req->pr_desc_bh, desc_kaddr); 423 req->pr_desc_bh, desc_kaddr);
387 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); 424 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page);
388 bitmap = nilfs_palloc_block_get_bitmap(inode, req->pr_bitmap_bh, 425 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh);
389 bitmap_kaddr);
390 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group), 426 if (!nilfs_clear_bit_atomic(nilfs_mdt_bgl_lock(inode, group),
391 group_offset, bitmap)) 427 group_offset, bitmap))
392 printk(KERN_WARNING "%s: entry numer %llu already freed\n", 428 printk(KERN_WARNING "%s: entry numer %llu already freed\n",
@@ -472,8 +508,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
472 desc = nilfs_palloc_block_get_group_desc( 508 desc = nilfs_palloc_block_get_group_desc(
473 inode, group, desc_bh, desc_kaddr); 509 inode, group, desc_bh, desc_kaddr);
474 bitmap_kaddr = kmap(bitmap_bh->b_page); 510 bitmap_kaddr = kmap(bitmap_bh->b_page);
475 bitmap = nilfs_palloc_block_get_bitmap( 511 bitmap = bitmap_kaddr + bh_offset(bitmap_bh);
476 inode, bitmap_bh, bitmap_kaddr);
477 for (j = i, n = 0; 512 for (j = i, n = 0;
478 (j < nitems) && nilfs_palloc_group_is_in(inode, group, 513 (j < nitems) && nilfs_palloc_group_is_in(inode, group,
479 entry_nrs[j]); 514 entry_nrs[j]);
@@ -502,3 +537,30 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
502 } 537 }
503 return 0; 538 return 0;
504} 539}
540
541void nilfs_palloc_setup_cache(struct inode *inode,
542 struct nilfs_palloc_cache *cache)
543{
544 NILFS_MDT(inode)->mi_palloc_cache = cache;
545 spin_lock_init(&cache->lock);
546}
547
548void nilfs_palloc_clear_cache(struct inode *inode)
549{
550 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache;
551
552 spin_lock(&cache->lock);
553 brelse(cache->prev_desc.bh);
554 brelse(cache->prev_bitmap.bh);
555 brelse(cache->prev_entry.bh);
556 cache->prev_desc.bh = NULL;
557 cache->prev_bitmap.bh = NULL;
558 cache->prev_entry.bh = NULL;
559 spin_unlock(&cache->lock);
560}
561
562void nilfs_palloc_destroy_cache(struct inode *inode)
563{
564 nilfs_palloc_clear_cache(inode);
565 NILFS_MDT(inode)->mi_palloc_cache = NULL;
566}
diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h
index 4ace5475c2c7..f4543ac4f560 100644
--- a/fs/nilfs2/alloc.h
+++ b/fs/nilfs2/alloc.h
@@ -69,4 +69,25 @@ int nilfs_palloc_freev(struct inode *, __u64 *, size_t);
69#define nilfs_clear_bit_atomic ext2_clear_bit_atomic 69#define nilfs_clear_bit_atomic ext2_clear_bit_atomic
70#define nilfs_find_next_zero_bit ext2_find_next_zero_bit 70#define nilfs_find_next_zero_bit ext2_find_next_zero_bit
71 71
72/*
73 * persistent object allocator cache
74 */
75
76struct nilfs_bh_assoc {
77 unsigned long blkoff;
78 struct buffer_head *bh;
79};
80
81struct nilfs_palloc_cache {
82 spinlock_t lock;
83 struct nilfs_bh_assoc prev_desc;
84 struct nilfs_bh_assoc prev_bitmap;
85 struct nilfs_bh_assoc prev_entry;
86};
87
88void nilfs_palloc_setup_cache(struct inode *inode,
89 struct nilfs_palloc_cache *cache);
90void nilfs_palloc_clear_cache(struct inode *inode);
91void nilfs_palloc_destroy_cache(struct inode *inode);
92
72#endif /* _NILFS_ALLOC_H */ 93#endif /* _NILFS_ALLOC_H */
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index 08834df6ec68..f4a14ea2ed9c 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -402,19 +402,11 @@ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
402void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n) 402void nilfs_bmap_add_blocks(const struct nilfs_bmap *bmap, int n)
403{ 403{
404 inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); 404 inode_add_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
405 if (NILFS_MDT(bmap->b_inode))
406 nilfs_mdt_mark_dirty(bmap->b_inode);
407 else
408 mark_inode_dirty(bmap->b_inode);
409} 405}
410 406
411void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n) 407void nilfs_bmap_sub_blocks(const struct nilfs_bmap *bmap, int n)
412{ 408{
413 inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n); 409 inode_sub_bytes(bmap->b_inode, (1 << bmap->b_inode->i_blkbits) * n);
414 if (NILFS_MDT(bmap->b_inode))
415 nilfs_mdt_mark_dirty(bmap->b_inode);
416 else
417 mark_inode_dirty(bmap->b_inode);
418} 410}
419 411
420__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, 412__u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 84c25382f8e3..471e269536ae 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -68,9 +68,34 @@ void nilfs_btnode_cache_clear(struct address_space *btnc)
68 truncate_inode_pages(btnc, 0); 68 truncate_inode_pages(btnc, 0);
69} 69}
70 70
71struct buffer_head *
72nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
73{
74 struct inode *inode = NILFS_BTNC_I(btnc);
75 struct buffer_head *bh;
76
77 bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
78 if (unlikely(!bh))
79 return NULL;
80
81 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
82 buffer_dirty(bh))) {
83 brelse(bh);
84 BUG();
85 }
86 memset(bh->b_data, 0, 1 << inode->i_blkbits);
87 bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
88 bh->b_blocknr = blocknr;
89 set_buffer_mapped(bh);
90 set_buffer_uptodate(bh);
91
92 unlock_page(bh->b_page);
93 page_cache_release(bh->b_page);
94 return bh;
95}
96
71int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr, 97int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
72 sector_t pblocknr, struct buffer_head **pbh, 98 sector_t pblocknr, struct buffer_head **pbh)
73 int newblk)
74{ 99{
75 struct buffer_head *bh; 100 struct buffer_head *bh;
76 struct inode *inode = NILFS_BTNC_I(btnc); 101 struct inode *inode = NILFS_BTNC_I(btnc);
@@ -81,19 +106,6 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
81 return -ENOMEM; 106 return -ENOMEM;
82 107
83 err = -EEXIST; /* internal code */ 108 err = -EEXIST; /* internal code */
84 if (newblk) {
85 if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
86 buffer_dirty(bh))) {
87 brelse(bh);
88 BUG();
89 }
90 memset(bh->b_data, 0, 1 << inode->i_blkbits);
91 bh->b_bdev = NILFS_I_NILFS(inode)->ns_bdev;
92 bh->b_blocknr = blocknr;
93 set_buffer_mapped(bh);
94 set_buffer_uptodate(bh);
95 goto found;
96 }
97 109
98 if (buffer_uptodate(bh) || buffer_dirty(bh)) 110 if (buffer_uptodate(bh) || buffer_dirty(bh))
99 goto found; 111 goto found;
@@ -135,27 +147,6 @@ out_locked:
135 return err; 147 return err;
136} 148}
137 149
138int nilfs_btnode_get(struct address_space *btnc, __u64 blocknr,
139 sector_t pblocknr, struct buffer_head **pbh, int newblk)
140{
141 struct buffer_head *bh;
142 int err;
143
144 err = nilfs_btnode_submit_block(btnc, blocknr, pblocknr, pbh, newblk);
145 if (err == -EEXIST) /* internal code (cache hit) */
146 return 0;
147 if (unlikely(err))
148 return err;
149
150 bh = *pbh;
151 wait_on_buffer(bh);
152 if (!buffer_uptodate(bh)) {
153 brelse(bh);
154 return -EIO;
155 }
156 return 0;
157}
158
159/** 150/**
160 * nilfs_btnode_delete - delete B-tree node buffer 151 * nilfs_btnode_delete - delete B-tree node buffer
161 * @bh: buffer to be deleted 152 * @bh: buffer to be deleted
@@ -244,12 +235,13 @@ retry:
244 unlock_page(obh->b_page); 235 unlock_page(obh->b_page);
245 } 236 }
246 237
247 err = nilfs_btnode_get(btnc, newkey, 0, &nbh, 1); 238 nbh = nilfs_btnode_create_block(btnc, newkey);
248 if (likely(!err)) { 239 if (!nbh)
249 BUG_ON(nbh == obh); 240 return -ENOMEM;
250 ctxt->newbh = nbh; 241
251 } 242 BUG_ON(nbh == obh);
252 return err; 243 ctxt->newbh = nbh;
244 return 0;
253 245
254 failed_unlock: 246 failed_unlock:
255 unlock_page(obh->b_page); 247 unlock_page(obh->b_page);
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 3e2275172ed6..07da83f07712 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -40,10 +40,10 @@ struct nilfs_btnode_chkey_ctxt {
40void nilfs_btnode_cache_init_once(struct address_space *); 40void nilfs_btnode_cache_init_once(struct address_space *);
41void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); 41void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
42void nilfs_btnode_cache_clear(struct address_space *); 42void nilfs_btnode_cache_clear(struct address_space *);
43struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
44 __u64 blocknr);
43int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, 45int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t,
44 struct buffer_head **, int); 46 struct buffer_head **);
45int nilfs_btnode_get(struct address_space *, __u64, sector_t,
46 struct buffer_head **, int);
47void nilfs_btnode_delete(struct buffer_head *); 47void nilfs_btnode_delete(struct buffer_head *);
48int nilfs_btnode_prepare_change_key(struct address_space *, 48int nilfs_btnode_prepare_change_key(struct address_space *,
49 struct nilfs_btnode_chkey_ctxt *); 49 struct nilfs_btnode_chkey_ctxt *);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index e25b507a474f..7cdd98b8d514 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -114,7 +114,18 @@ static int nilfs_btree_get_block(const struct nilfs_btree *btree, __u64 ptr,
114{ 114{
115 struct address_space *btnc = 115 struct address_space *btnc =
116 &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; 116 &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
117 return nilfs_btnode_get(btnc, ptr, 0, bhp, 0); 117 int err;
118
119 err = nilfs_btnode_submit_block(btnc, ptr, 0, bhp);
120 if (err)
121 return err == -EEXIST ? 0 : err;
122
123 wait_on_buffer(*bhp);
124 if (!buffer_uptodate(*bhp)) {
125 brelse(*bhp);
126 return -EIO;
127 }
128 return 0;
118} 129}
119 130
120static int nilfs_btree_get_new_block(const struct nilfs_btree *btree, 131static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
@@ -122,12 +133,15 @@ static int nilfs_btree_get_new_block(const struct nilfs_btree *btree,
122{ 133{
123 struct address_space *btnc = 134 struct address_space *btnc =
124 &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache; 135 &NILFS_BMAP_I((struct nilfs_bmap *)btree)->i_btnode_cache;
125 int ret; 136 struct buffer_head *bh;
126 137
127 ret = nilfs_btnode_get(btnc, ptr, 0, bhp, 1); 138 bh = nilfs_btnode_create_block(btnc, ptr);
128 if (!ret) 139 if (!bh)
129 set_buffer_nilfs_volatile(*bhp); 140 return -ENOMEM;
130 return ret; 141
142 set_buffer_nilfs_volatile(bh);
143 *bhp = bh;
144 return 0;
131} 145}
132 146
133static inline int 147static inline int
@@ -444,6 +458,18 @@ nilfs_btree_get_node(const struct nilfs_btree *btree,
444 nilfs_btree_get_nonroot_node(path, level); 458 nilfs_btree_get_nonroot_node(path, level);
445} 459}
446 460
461static inline int
462nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
463{
464 if (unlikely(nilfs_btree_node_get_level(node) != level)) {
465 dump_stack();
466 printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
467 nilfs_btree_node_get_level(node), level);
468 return 1;
469 }
470 return 0;
471}
472
447static int nilfs_btree_do_lookup(const struct nilfs_btree *btree, 473static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
448 struct nilfs_btree_path *path, 474 struct nilfs_btree_path *path,
449 __u64 key, __u64 *ptrp, int minlevel) 475 __u64 key, __u64 *ptrp, int minlevel)
@@ -467,7 +493,8 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
467 if (ret < 0) 493 if (ret < 0)
468 return ret; 494 return ret;
469 node = nilfs_btree_get_nonroot_node(path, level); 495 node = nilfs_btree_get_nonroot_node(path, level);
470 BUG_ON(level != nilfs_btree_node_get_level(node)); 496 if (nilfs_btree_bad_node(node, level))
497 return -EINVAL;
471 if (!found) 498 if (!found)
472 found = nilfs_btree_node_lookup(node, key, &index); 499 found = nilfs_btree_node_lookup(node, key, &index);
473 else 500 else
@@ -512,7 +539,8 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_btree *btree,
512 if (ret < 0) 539 if (ret < 0)
513 return ret; 540 return ret;
514 node = nilfs_btree_get_nonroot_node(path, level); 541 node = nilfs_btree_get_nonroot_node(path, level);
515 BUG_ON(level != nilfs_btree_node_get_level(node)); 542 if (nilfs_btree_bad_node(node, level))
543 return -EINVAL;
516 index = nilfs_btree_node_get_nchildren(node) - 1; 544 index = nilfs_btree_node_get_nchildren(node) - 1;
517 ptr = nilfs_btree_node_get_ptr(btree, node, index); 545 ptr = nilfs_btree_node_get_ptr(btree, node, index);
518 path[level].bp_index = index; 546 path[level].bp_index = index;
@@ -638,13 +666,11 @@ static void nilfs_btree_promote_key(struct nilfs_btree *btree,
638{ 666{
639 if (level < nilfs_btree_height(btree) - 1) { 667 if (level < nilfs_btree_height(btree) - 1) {
640 do { 668 do {
641 lock_buffer(path[level].bp_bh);
642 nilfs_btree_node_set_key( 669 nilfs_btree_node_set_key(
643 nilfs_btree_get_nonroot_node(path, level), 670 nilfs_btree_get_nonroot_node(path, level),
644 path[level].bp_index, key); 671 path[level].bp_index, key);
645 if (!buffer_dirty(path[level].bp_bh)) 672 if (!buffer_dirty(path[level].bp_bh))
646 nilfs_btnode_mark_dirty(path[level].bp_bh); 673 nilfs_btnode_mark_dirty(path[level].bp_bh);
647 unlock_buffer(path[level].bp_bh);
648 } while ((path[level].bp_index == 0) && 674 } while ((path[level].bp_index == 0) &&
649 (++level < nilfs_btree_height(btree) - 1)); 675 (++level < nilfs_btree_height(btree) - 1));
650 } 676 }
@@ -663,13 +689,11 @@ static void nilfs_btree_do_insert(struct nilfs_btree *btree,
663 struct nilfs_btree_node *node; 689 struct nilfs_btree_node *node;
664 690
665 if (level < nilfs_btree_height(btree) - 1) { 691 if (level < nilfs_btree_height(btree) - 1) {
666 lock_buffer(path[level].bp_bh);
667 node = nilfs_btree_get_nonroot_node(path, level); 692 node = nilfs_btree_get_nonroot_node(path, level);
668 nilfs_btree_node_insert(btree, node, *keyp, *ptrp, 693 nilfs_btree_node_insert(btree, node, *keyp, *ptrp,
669 path[level].bp_index); 694 path[level].bp_index);
670 if (!buffer_dirty(path[level].bp_bh)) 695 if (!buffer_dirty(path[level].bp_bh))
671 nilfs_btnode_mark_dirty(path[level].bp_bh); 696 nilfs_btnode_mark_dirty(path[level].bp_bh);
672 unlock_buffer(path[level].bp_bh);
673 697
674 if (path[level].bp_index == 0) 698 if (path[level].bp_index == 0)
675 nilfs_btree_promote_key(btree, path, level + 1, 699 nilfs_btree_promote_key(btree, path, level + 1,
@@ -689,9 +713,6 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree,
689 struct nilfs_btree_node *node, *left; 713 struct nilfs_btree_node *node, *left;
690 int nchildren, lnchildren, n, move; 714 int nchildren, lnchildren, n, move;
691 715
692 lock_buffer(path[level].bp_bh);
693 lock_buffer(path[level].bp_sib_bh);
694
695 node = nilfs_btree_get_nonroot_node(path, level); 716 node = nilfs_btree_get_nonroot_node(path, level);
696 left = nilfs_btree_get_sib_node(path, level); 717 left = nilfs_btree_get_sib_node(path, level);
697 nchildren = nilfs_btree_node_get_nchildren(node); 718 nchildren = nilfs_btree_node_get_nchildren(node);
@@ -712,9 +733,6 @@ static void nilfs_btree_carry_left(struct nilfs_btree *btree,
712 if (!buffer_dirty(path[level].bp_sib_bh)) 733 if (!buffer_dirty(path[level].bp_sib_bh))
713 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 734 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
714 735
715 unlock_buffer(path[level].bp_bh);
716 unlock_buffer(path[level].bp_sib_bh);
717
718 nilfs_btree_promote_key(btree, path, level + 1, 736 nilfs_btree_promote_key(btree, path, level + 1,
719 nilfs_btree_node_get_key(node, 0)); 737 nilfs_btree_node_get_key(node, 0));
720 738
@@ -740,9 +758,6 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree,
740 struct nilfs_btree_node *node, *right; 758 struct nilfs_btree_node *node, *right;
741 int nchildren, rnchildren, n, move; 759 int nchildren, rnchildren, n, move;
742 760
743 lock_buffer(path[level].bp_bh);
744 lock_buffer(path[level].bp_sib_bh);
745
746 node = nilfs_btree_get_nonroot_node(path, level); 761 node = nilfs_btree_get_nonroot_node(path, level);
747 right = nilfs_btree_get_sib_node(path, level); 762 right = nilfs_btree_get_sib_node(path, level);
748 nchildren = nilfs_btree_node_get_nchildren(node); 763 nchildren = nilfs_btree_node_get_nchildren(node);
@@ -763,9 +778,6 @@ static void nilfs_btree_carry_right(struct nilfs_btree *btree,
763 if (!buffer_dirty(path[level].bp_sib_bh)) 778 if (!buffer_dirty(path[level].bp_sib_bh))
764 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 779 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
765 780
766 unlock_buffer(path[level].bp_bh);
767 unlock_buffer(path[level].bp_sib_bh);
768
769 path[level + 1].bp_index++; 781 path[level + 1].bp_index++;
770 nilfs_btree_promote_key(btree, path, level + 1, 782 nilfs_btree_promote_key(btree, path, level + 1,
771 nilfs_btree_node_get_key(right, 0)); 783 nilfs_btree_node_get_key(right, 0));
@@ -794,9 +806,6 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
794 __u64 newptr; 806 __u64 newptr;
795 int nchildren, n, move; 807 int nchildren, n, move;
796 808
797 lock_buffer(path[level].bp_bh);
798 lock_buffer(path[level].bp_sib_bh);
799
800 node = nilfs_btree_get_nonroot_node(path, level); 809 node = nilfs_btree_get_nonroot_node(path, level);
801 right = nilfs_btree_get_sib_node(path, level); 810 right = nilfs_btree_get_sib_node(path, level);
802 nchildren = nilfs_btree_node_get_nchildren(node); 811 nchildren = nilfs_btree_node_get_nchildren(node);
@@ -815,9 +824,6 @@ static void nilfs_btree_split(struct nilfs_btree *btree,
815 if (!buffer_dirty(path[level].bp_sib_bh)) 824 if (!buffer_dirty(path[level].bp_sib_bh))
816 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 825 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
817 826
818 unlock_buffer(path[level].bp_bh);
819 unlock_buffer(path[level].bp_sib_bh);
820
821 newkey = nilfs_btree_node_get_key(right, 0); 827 newkey = nilfs_btree_node_get_key(right, 0);
822 newptr = path[level].bp_newreq.bpr_ptr; 828 newptr = path[level].bp_newreq.bpr_ptr;
823 829
@@ -852,8 +858,6 @@ static void nilfs_btree_grow(struct nilfs_btree *btree,
852 struct nilfs_btree_node *root, *child; 858 struct nilfs_btree_node *root, *child;
853 int n; 859 int n;
854 860
855 lock_buffer(path[level].bp_sib_bh);
856
857 root = nilfs_btree_get_root(btree); 861 root = nilfs_btree_get_root(btree);
858 child = nilfs_btree_get_sib_node(path, level); 862 child = nilfs_btree_get_sib_node(path, level);
859 863
@@ -865,8 +869,6 @@ static void nilfs_btree_grow(struct nilfs_btree *btree,
865 if (!buffer_dirty(path[level].bp_sib_bh)) 869 if (!buffer_dirty(path[level].bp_sib_bh))
866 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 870 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
867 871
868 unlock_buffer(path[level].bp_sib_bh);
869
870 path[level].bp_bh = path[level].bp_sib_bh; 872 path[level].bp_bh = path[level].bp_sib_bh;
871 path[level].bp_sib_bh = NULL; 873 path[level].bp_sib_bh = NULL;
872 874
@@ -1023,11 +1025,9 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1023 1025
1024 stats->bs_nblocks++; 1026 stats->bs_nblocks++;
1025 1027
1026 lock_buffer(bh);
1027 nilfs_btree_node_init(btree, 1028 nilfs_btree_node_init(btree,
1028 (struct nilfs_btree_node *)bh->b_data, 1029 (struct nilfs_btree_node *)bh->b_data,
1029 0, level, 0, NULL, NULL); 1030 0, level, 0, NULL, NULL);
1030 unlock_buffer(bh);
1031 path[level].bp_sib_bh = bh; 1031 path[level].bp_sib_bh = bh;
1032 path[level].bp_op = nilfs_btree_split; 1032 path[level].bp_op = nilfs_btree_split;
1033 } 1033 }
@@ -1052,10 +1052,8 @@ static int nilfs_btree_prepare_insert(struct nilfs_btree *btree,
1052 if (ret < 0) 1052 if (ret < 0)
1053 goto err_out_curr_node; 1053 goto err_out_curr_node;
1054 1054
1055 lock_buffer(bh);
1056 nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data, 1055 nilfs_btree_node_init(btree, (struct nilfs_btree_node *)bh->b_data,
1057 0, level, 0, NULL, NULL); 1056 0, level, 0, NULL, NULL);
1058 unlock_buffer(bh);
1059 path[level].bp_sib_bh = bh; 1057 path[level].bp_sib_bh = bh;
1060 path[level].bp_op = nilfs_btree_grow; 1058 path[level].bp_op = nilfs_btree_grow;
1061 1059
@@ -1154,13 +1152,11 @@ static void nilfs_btree_do_delete(struct nilfs_btree *btree,
1154 struct nilfs_btree_node *node; 1152 struct nilfs_btree_node *node;
1155 1153
1156 if (level < nilfs_btree_height(btree) - 1) { 1154 if (level < nilfs_btree_height(btree) - 1) {
1157 lock_buffer(path[level].bp_bh);
1158 node = nilfs_btree_get_nonroot_node(path, level); 1155 node = nilfs_btree_get_nonroot_node(path, level);
1159 nilfs_btree_node_delete(btree, node, keyp, ptrp, 1156 nilfs_btree_node_delete(btree, node, keyp, ptrp,
1160 path[level].bp_index); 1157 path[level].bp_index);
1161 if (!buffer_dirty(path[level].bp_bh)) 1158 if (!buffer_dirty(path[level].bp_bh))
1162 nilfs_btnode_mark_dirty(path[level].bp_bh); 1159 nilfs_btnode_mark_dirty(path[level].bp_bh);
1163 unlock_buffer(path[level].bp_bh);
1164 if (path[level].bp_index == 0) 1160 if (path[level].bp_index == 0)
1165 nilfs_btree_promote_key(btree, path, level + 1, 1161 nilfs_btree_promote_key(btree, path, level + 1,
1166 nilfs_btree_node_get_key(node, 0)); 1162 nilfs_btree_node_get_key(node, 0));
@@ -1180,9 +1176,6 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
1180 1176
1181 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); 1177 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1182 1178
1183 lock_buffer(path[level].bp_bh);
1184 lock_buffer(path[level].bp_sib_bh);
1185
1186 node = nilfs_btree_get_nonroot_node(path, level); 1179 node = nilfs_btree_get_nonroot_node(path, level);
1187 left = nilfs_btree_get_sib_node(path, level); 1180 left = nilfs_btree_get_sib_node(path, level);
1188 nchildren = nilfs_btree_node_get_nchildren(node); 1181 nchildren = nilfs_btree_node_get_nchildren(node);
@@ -1197,9 +1190,6 @@ static void nilfs_btree_borrow_left(struct nilfs_btree *btree,
1197 if (!buffer_dirty(path[level].bp_sib_bh)) 1190 if (!buffer_dirty(path[level].bp_sib_bh))
1198 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 1191 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
1199 1192
1200 unlock_buffer(path[level].bp_bh);
1201 unlock_buffer(path[level].bp_sib_bh);
1202
1203 nilfs_btree_promote_key(btree, path, level + 1, 1193 nilfs_btree_promote_key(btree, path, level + 1,
1204 nilfs_btree_node_get_key(node, 0)); 1194 nilfs_btree_node_get_key(node, 0));
1205 1195
@@ -1217,9 +1207,6 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
1217 1207
1218 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); 1208 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1219 1209
1220 lock_buffer(path[level].bp_bh);
1221 lock_buffer(path[level].bp_sib_bh);
1222
1223 node = nilfs_btree_get_nonroot_node(path, level); 1210 node = nilfs_btree_get_nonroot_node(path, level);
1224 right = nilfs_btree_get_sib_node(path, level); 1211 right = nilfs_btree_get_sib_node(path, level);
1225 nchildren = nilfs_btree_node_get_nchildren(node); 1212 nchildren = nilfs_btree_node_get_nchildren(node);
@@ -1234,9 +1221,6 @@ static void nilfs_btree_borrow_right(struct nilfs_btree *btree,
1234 if (!buffer_dirty(path[level].bp_sib_bh)) 1221 if (!buffer_dirty(path[level].bp_sib_bh))
1235 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 1222 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
1236 1223
1237 unlock_buffer(path[level].bp_bh);
1238 unlock_buffer(path[level].bp_sib_bh);
1239
1240 path[level + 1].bp_index++; 1224 path[level + 1].bp_index++;
1241 nilfs_btree_promote_key(btree, path, level + 1, 1225 nilfs_btree_promote_key(btree, path, level + 1,
1242 nilfs_btree_node_get_key(right, 0)); 1226 nilfs_btree_node_get_key(right, 0));
@@ -1255,9 +1239,6 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree,
1255 1239
1256 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); 1240 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1257 1241
1258 lock_buffer(path[level].bp_bh);
1259 lock_buffer(path[level].bp_sib_bh);
1260
1261 node = nilfs_btree_get_nonroot_node(path, level); 1242 node = nilfs_btree_get_nonroot_node(path, level);
1262 left = nilfs_btree_get_sib_node(path, level); 1243 left = nilfs_btree_get_sib_node(path, level);
1263 1244
@@ -1268,9 +1249,6 @@ static void nilfs_btree_concat_left(struct nilfs_btree *btree,
1268 if (!buffer_dirty(path[level].bp_sib_bh)) 1249 if (!buffer_dirty(path[level].bp_sib_bh))
1269 nilfs_btnode_mark_dirty(path[level].bp_sib_bh); 1250 nilfs_btnode_mark_dirty(path[level].bp_sib_bh);
1270 1251
1271 unlock_buffer(path[level].bp_bh);
1272 unlock_buffer(path[level].bp_sib_bh);
1273
1274 nilfs_btnode_delete(path[level].bp_bh); 1252 nilfs_btnode_delete(path[level].bp_bh);
1275 path[level].bp_bh = path[level].bp_sib_bh; 1253 path[level].bp_bh = path[level].bp_sib_bh;
1276 path[level].bp_sib_bh = NULL; 1254 path[level].bp_sib_bh = NULL;
@@ -1286,9 +1264,6 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree,
1286 1264
1287 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); 1265 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1288 1266
1289 lock_buffer(path[level].bp_bh);
1290 lock_buffer(path[level].bp_sib_bh);
1291
1292 node = nilfs_btree_get_nonroot_node(path, level); 1267 node = nilfs_btree_get_nonroot_node(path, level);
1293 right = nilfs_btree_get_sib_node(path, level); 1268 right = nilfs_btree_get_sib_node(path, level);
1294 1269
@@ -1299,9 +1274,6 @@ static void nilfs_btree_concat_right(struct nilfs_btree *btree,
1299 if (!buffer_dirty(path[level].bp_bh)) 1274 if (!buffer_dirty(path[level].bp_bh))
1300 nilfs_btnode_mark_dirty(path[level].bp_bh); 1275 nilfs_btnode_mark_dirty(path[level].bp_bh);
1301 1276
1302 unlock_buffer(path[level].bp_bh);
1303 unlock_buffer(path[level].bp_sib_bh);
1304
1305 nilfs_btnode_delete(path[level].bp_sib_bh); 1277 nilfs_btnode_delete(path[level].bp_sib_bh);
1306 path[level].bp_sib_bh = NULL; 1278 path[level].bp_sib_bh = NULL;
1307 path[level + 1].bp_index++; 1279 path[level + 1].bp_index++;
@@ -1316,7 +1288,6 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree,
1316 1288
1317 nilfs_btree_do_delete(btree, path, level, keyp, ptrp); 1289 nilfs_btree_do_delete(btree, path, level, keyp, ptrp);
1318 1290
1319 lock_buffer(path[level].bp_bh);
1320 root = nilfs_btree_get_root(btree); 1291 root = nilfs_btree_get_root(btree);
1321 child = nilfs_btree_get_nonroot_node(path, level); 1292 child = nilfs_btree_get_nonroot_node(path, level);
1322 1293
@@ -1324,7 +1295,6 @@ static void nilfs_btree_shrink(struct nilfs_btree *btree,
1324 nilfs_btree_node_set_level(root, level); 1295 nilfs_btree_node_set_level(root, level);
1325 n = nilfs_btree_node_get_nchildren(child); 1296 n = nilfs_btree_node_get_nchildren(child);
1326 nilfs_btree_node_move_left(btree, root, child, n); 1297 nilfs_btree_node_move_left(btree, root, child, n);
1327 unlock_buffer(path[level].bp_bh);
1328 1298
1329 nilfs_btnode_delete(path[level].bp_bh); 1299 nilfs_btnode_delete(path[level].bp_bh);
1330 path[level].bp_bh = NULL; 1300 path[level].bp_bh = NULL;
@@ -1699,7 +1669,6 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1699 nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat); 1669 nilfs_bmap_commit_alloc_ptr(bmap, nreq, dat);
1700 1670
1701 /* create child node at level 1 */ 1671 /* create child node at level 1 */
1702 lock_buffer(bh);
1703 node = (struct nilfs_btree_node *)bh->b_data; 1672 node = (struct nilfs_btree_node *)bh->b_data;
1704 nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs); 1673 nilfs_btree_node_init(btree, node, 0, 1, n, keys, ptrs);
1705 nilfs_btree_node_insert(btree, node, 1674 nilfs_btree_node_insert(btree, node,
@@ -1709,7 +1678,6 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *bmap,
1709 if (!nilfs_bmap_dirty(bmap)) 1678 if (!nilfs_bmap_dirty(bmap))
1710 nilfs_bmap_set_dirty(bmap); 1679 nilfs_bmap_set_dirty(bmap);
1711 1680
1712 unlock_buffer(bh);
1713 brelse(bh); 1681 brelse(bh);
1714 1682
1715 /* create root node at level 2 */ 1683 /* create root node at level 2 */
@@ -2050,7 +2018,7 @@ static void nilfs_btree_lookup_dirty_buffers(struct nilfs_bmap *bmap,
2050 for (level = NILFS_BTREE_LEVEL_NODE_MIN; 2018 for (level = NILFS_BTREE_LEVEL_NODE_MIN;
2051 level < NILFS_BTREE_LEVEL_MAX; 2019 level < NILFS_BTREE_LEVEL_MAX;
2052 level++) 2020 level++)
2053 list_splice(&lists[level], listp->prev); 2021 list_splice_tail(&lists[level], listp);
2054} 2022}
2055 2023
2056static int nilfs_btree_assign_p(struct nilfs_btree *btree, 2024static int nilfs_btree_assign_p(struct nilfs_btree *btree,
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index 0e72bbbc6b64..4b82d84ade75 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -34,28 +34,6 @@ struct nilfs_btree;
34struct nilfs_btree_path; 34struct nilfs_btree_path;
35 35
36/** 36/**
37 * struct nilfs_btree_node - B-tree node
38 * @bn_flags: flags
39 * @bn_level: level
40 * @bn_nchildren: number of children
41 * @bn_pad: padding
42 */
43struct nilfs_btree_node {
44 __u8 bn_flags;
45 __u8 bn_level;
46 __le16 bn_nchildren;
47 __le32 bn_pad;
48};
49
50/* flags */
51#define NILFS_BTREE_NODE_ROOT 0x01
52
53/* level */
54#define NILFS_BTREE_LEVEL_DATA 0
55#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
56#define NILFS_BTREE_LEVEL_MAX 14
57
58/**
59 * struct nilfs_btree - B-tree structure 37 * struct nilfs_btree - B-tree structure
60 * @bt_bmap: bmap base structure 38 * @bt_bmap: bmap base structure
61 */ 39 */
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 3f5d5d06f53c..d5ad54e204a5 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -926,3 +926,29 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
926 up_read(&NILFS_MDT(cpfile)->mi_sem); 926 up_read(&NILFS_MDT(cpfile)->mi_sem);
927 return ret; 927 return ret;
928} 928}
929
930/**
931 * nilfs_cpfile_read - read cpfile inode
932 * @cpfile: cpfile inode
933 * @raw_inode: on-disk cpfile inode
934 */
935int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode)
936{
937 return nilfs_read_inode_common(cpfile, raw_inode);
938}
939
940/**
941 * nilfs_cpfile_new - create cpfile
942 * @nilfs: nilfs object
943 * @cpsize: size of a checkpoint entry
944 */
945struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize)
946{
947 struct inode *cpfile;
948
949 cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO, 0);
950 if (cpfile)
951 nilfs_mdt_set_entry_size(cpfile, cpsize,
952 sizeof(struct nilfs_cpfile_header));
953 return cpfile;
954}
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index debea896e701..bc0809e0ab43 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -40,4 +40,7 @@ int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *);
40ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned, 40ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned,
41 size_t); 41 size_t);
42 42
43int nilfs_cpfile_read(struct inode *cpfile, struct nilfs_inode *raw_inode);
44struct inode *nilfs_cpfile_new(struct the_nilfs *nilfs, size_t cpsize);
45
43#endif /* _NILFS_CPFILE_H */ 46#endif /* _NILFS_CPFILE_H */
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 1ff8e15bd36b..187dd07ba86c 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -33,6 +33,16 @@
33#define NILFS_CNO_MIN ((__u64)1) 33#define NILFS_CNO_MIN ((__u64)1)
34#define NILFS_CNO_MAX (~(__u64)0) 34#define NILFS_CNO_MAX (~(__u64)0)
35 35
36struct nilfs_dat_info {
37 struct nilfs_mdt_info mi;
38 struct nilfs_palloc_cache palloc_cache;
39};
40
41static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat)
42{
43 return (struct nilfs_dat_info *)NILFS_MDT(dat);
44}
45
36static int nilfs_dat_prepare_entry(struct inode *dat, 46static int nilfs_dat_prepare_entry(struct inode *dat,
37 struct nilfs_palloc_req *req, int create) 47 struct nilfs_palloc_req *req, int create)
38{ 48{
@@ -425,3 +435,40 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
425 435
426 return nvi; 436 return nvi;
427} 437}
438
439/**
440 * nilfs_dat_read - read dat inode
441 * @dat: dat inode
442 * @raw_inode: on-disk dat inode
443 */
444int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode)
445{
446 return nilfs_read_inode_common(dat, raw_inode);
447}
448
449/**
450 * nilfs_dat_new - create dat file
451 * @nilfs: nilfs object
452 * @entry_size: size of a dat entry
453 */
454struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size)
455{
456 static struct lock_class_key dat_lock_key;
457 struct inode *dat;
458 struct nilfs_dat_info *di;
459 int err;
460
461 dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO, sizeof(*di));
462 if (dat) {
463 err = nilfs_palloc_init_blockgroup(dat, entry_size);
464 if (unlikely(err)) {
465 nilfs_mdt_destroy(dat);
466 return NULL;
467 }
468
469 di = NILFS_DAT_I(dat);
470 lockdep_set_class(&di->mi.mi_sem, &dat_lock_key);
471 nilfs_palloc_setup_cache(dat, &di->palloc_cache);
472 }
473 return dat;
474}
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index 406070d3ff49..d31c3aab0efe 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -53,4 +53,7 @@ int nilfs_dat_freev(struct inode *, __u64 *, size_t);
53int nilfs_dat_move(struct inode *, __u64, sector_t); 53int nilfs_dat_move(struct inode *, __u64, sector_t);
54ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t); 54ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t);
55 55
56int nilfs_dat_read(struct inode *dat, struct nilfs_inode *raw_inode);
57struct inode *nilfs_dat_new(struct the_nilfs *nilfs, size_t entry_size);
58
56#endif /* _NILFS_DAT_H */ 59#endif /* _NILFS_DAT_H */
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index e097099bfc8f..76d803e060a9 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -99,9 +99,9 @@ static int nilfs_prepare_chunk(struct page *page,
99 NULL, nilfs_get_block); 99 NULL, nilfs_get_block);
100} 100}
101 101
102static int nilfs_commit_chunk(struct page *page, 102static void nilfs_commit_chunk(struct page *page,
103 struct address_space *mapping, 103 struct address_space *mapping,
104 unsigned from, unsigned to) 104 unsigned from, unsigned to)
105{ 105{
106 struct inode *dir = mapping->host; 106 struct inode *dir = mapping->host;
107 struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb); 107 struct nilfs_sb_info *sbi = NILFS_SB(dir->i_sb);
@@ -112,15 +112,13 @@ static int nilfs_commit_chunk(struct page *page,
112 112
113 nr_dirty = nilfs_page_count_clean_buffers(page, from, to); 113 nr_dirty = nilfs_page_count_clean_buffers(page, from, to);
114 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL); 114 copied = block_write_end(NULL, mapping, pos, len, len, page, NULL);
115 if (pos + copied > dir->i_size) { 115 if (pos + copied > dir->i_size)
116 i_size_write(dir, pos + copied); 116 i_size_write(dir, pos + copied);
117 mark_inode_dirty(dir);
118 }
119 if (IS_DIRSYNC(dir)) 117 if (IS_DIRSYNC(dir))
120 nilfs_set_transaction_flag(NILFS_TI_SYNC); 118 nilfs_set_transaction_flag(NILFS_TI_SYNC);
121 err = nilfs_set_file_dirty(sbi, dir, nr_dirty); 119 err = nilfs_set_file_dirty(sbi, dir, nr_dirty);
120 WARN_ON(err); /* do not happen */
122 unlock_page(page); 121 unlock_page(page);
123 return err;
124} 122}
125 123
126static void nilfs_check_page(struct page *page) 124static void nilfs_check_page(struct page *page)
@@ -455,11 +453,10 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
455 BUG_ON(err); 453 BUG_ON(err);
456 de->inode = cpu_to_le64(inode->i_ino); 454 de->inode = cpu_to_le64(inode->i_ino);
457 nilfs_set_de_type(de, inode); 455 nilfs_set_de_type(de, inode);
458 err = nilfs_commit_chunk(page, mapping, from, to); 456 nilfs_commit_chunk(page, mapping, from, to);
459 nilfs_put_page(page); 457 nilfs_put_page(page);
460 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 458 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
461/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */ 459/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
462 mark_inode_dirty(dir);
463} 460}
464 461
465/* 462/*
@@ -548,10 +545,10 @@ got_it:
548 memcpy(de->name, name, namelen); 545 memcpy(de->name, name, namelen);
549 de->inode = cpu_to_le64(inode->i_ino); 546 de->inode = cpu_to_le64(inode->i_ino);
550 nilfs_set_de_type(de, inode); 547 nilfs_set_de_type(de, inode);
551 err = nilfs_commit_chunk(page, page->mapping, from, to); 548 nilfs_commit_chunk(page, page->mapping, from, to);
552 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 549 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
553/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */ 550/* NILFS_I(dir)->i_flags &= ~NILFS_BTREE_FL; */
554 mark_inode_dirty(dir); 551 nilfs_mark_inode_dirty(dir);
555 /* OFFSET_CACHE */ 552 /* OFFSET_CACHE */
556out_put: 553out_put:
557 nilfs_put_page(page); 554 nilfs_put_page(page);
@@ -595,10 +592,9 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
595 if (pde) 592 if (pde)
596 pde->rec_len = cpu_to_le16(to - from); 593 pde->rec_len = cpu_to_le16(to - from);
597 dir->inode = 0; 594 dir->inode = 0;
598 err = nilfs_commit_chunk(page, mapping, from, to); 595 nilfs_commit_chunk(page, mapping, from, to);
599 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 596 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
600/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */ 597/* NILFS_I(inode)->i_flags &= ~NILFS_BTREE_FL; */
601 mark_inode_dirty(inode);
602out: 598out:
603 nilfs_put_page(page); 599 nilfs_put_page(page);
604 return err; 600 return err;
@@ -640,7 +636,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
640 memcpy(de->name, "..\0", 4); 636 memcpy(de->name, "..\0", 4);
641 nilfs_set_de_type(de, inode); 637 nilfs_set_de_type(de, inode);
642 kunmap_atomic(kaddr, KM_USER0); 638 kunmap_atomic(kaddr, KM_USER0);
643 err = nilfs_commit_chunk(page, mapping, 0, chunk_size); 639 nilfs_commit_chunk(page, mapping, 0, chunk_size);
644fail: 640fail:
645 page_cache_release(page); 641 page_cache_release(page);
646 return err; 642 return err;
diff --git a/fs/nilfs2/gcdat.c b/fs/nilfs2/gcdat.c
index 93383c5cee90..dd5f7e0a95f6 100644
--- a/fs/nilfs2/gcdat.c
+++ b/fs/nilfs2/gcdat.c
@@ -61,6 +61,8 @@ void nilfs_commit_gcdat_inode(struct the_nilfs *nilfs)
61 61
62 nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap); 62 nilfs_bmap_commit_gcdat(gii->i_bmap, dii->i_bmap);
63 63
64 nilfs_palloc_clear_cache(dat);
65 nilfs_palloc_clear_cache(gcdat);
64 nilfs_clear_dirty_pages(mapping); 66 nilfs_clear_dirty_pages(mapping);
65 nilfs_copy_back_pages(mapping, gmapping); 67 nilfs_copy_back_pages(mapping, gmapping);
66 /* note: mdt dirty flags should be cleared by segctor. */ 68 /* note: mdt dirty flags should be cleared by segctor. */
@@ -79,6 +81,7 @@ void nilfs_clear_gcdat_inode(struct the_nilfs *nilfs)
79 gcdat->i_state = I_CLEAR; 81 gcdat->i_state = I_CLEAR;
80 gii->i_flags = 0; 82 gii->i_flags = 0;
81 83
84 nilfs_palloc_clear_cache(gcdat);
82 truncate_inode_pages(gcdat->i_mapping, 0); 85 truncate_inode_pages(gcdat->i_mapping, 0);
83 truncate_inode_pages(&gii->i_btnode_cache, 0); 86 truncate_inode_pages(&gii->i_btnode_cache, 0);
84} 87}
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index e6de0a27ab5d..e16a6664dfa2 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -149,7 +149,7 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
149 __u64 vbn, struct buffer_head **out_bh) 149 __u64 vbn, struct buffer_head **out_bh)
150{ 150{
151 int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache, 151 int ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
152 vbn ? : pbn, pbn, out_bh, 0); 152 vbn ? : pbn, pbn, out_bh);
153 if (ret == -EEXIST) /* internal code (cache hit) */ 153 if (ret == -EEXIST) /* internal code (cache hit) */
154 ret = 0; 154 ret = 0;
155 return ret; 155 return ret;
@@ -212,9 +212,10 @@ void nilfs_destroy_gccache(struct the_nilfs *nilfs)
212static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino, 212static struct inode *alloc_gcinode(struct the_nilfs *nilfs, ino_t ino,
213 __u64 cno) 213 __u64 cno)
214{ 214{
215 struct inode *inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS); 215 struct inode *inode;
216 struct nilfs_inode_info *ii; 216 struct nilfs_inode_info *ii;
217 217
218 inode = nilfs_mdt_new_common(nilfs, NULL, ino, GFP_NOFS, 0);
218 if (!inode) 219 if (!inode)
219 return NULL; 220 return NULL;
220 221
@@ -265,7 +266,6 @@ struct inode *nilfs_gc_iget(struct the_nilfs *nilfs, ino_t ino, __u64 cno)
265 */ 266 */
266void nilfs_clear_gcinode(struct inode *inode) 267void nilfs_clear_gcinode(struct inode *inode)
267{ 268{
268 nilfs_mdt_clear(inode);
269 nilfs_mdt_destroy(inode); 269 nilfs_mdt_destroy(inode);
270} 270}
271 271
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index de86401f209f..922d9dd42c8f 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -29,6 +29,17 @@
29#include "alloc.h" 29#include "alloc.h"
30#include "ifile.h" 30#include "ifile.h"
31 31
32
33struct nilfs_ifile_info {
34 struct nilfs_mdt_info mi;
35 struct nilfs_palloc_cache palloc_cache;
36};
37
38static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
39{
40 return (struct nilfs_ifile_info *)NILFS_MDT(ifile);
41}
42
32/** 43/**
33 * nilfs_ifile_create_inode - create a new disk inode 44 * nilfs_ifile_create_inode - create a new disk inode
34 * @ifile: ifile inode 45 * @ifile: ifile inode
@@ -148,3 +159,27 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
148 } 159 }
149 return err; 160 return err;
150} 161}
162
163/**
164 * nilfs_ifile_new - create inode file
165 * @sbi: nilfs_sb_info struct
166 * @inode_size: size of an inode
167 */
168struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size)
169{
170 struct inode *ifile;
171 int err;
172
173 ifile = nilfs_mdt_new(sbi->s_nilfs, sbi->s_super, NILFS_IFILE_INO,
174 sizeof(struct nilfs_ifile_info));
175 if (ifile) {
176 err = nilfs_palloc_init_blockgroup(ifile, inode_size);
177 if (unlikely(err)) {
178 nilfs_mdt_destroy(ifile);
179 return NULL;
180 }
181 nilfs_palloc_setup_cache(ifile,
182 &NILFS_IFILE_I(ifile)->palloc_cache);
183 }
184 return ifile;
185}
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index ecc3ba76db47..cbca32e498f2 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -49,4 +49,6 @@ int nilfs_ifile_create_inode(struct inode *, ino_t *, struct buffer_head **);
49int nilfs_ifile_delete_inode(struct inode *, ino_t); 49int nilfs_ifile_delete_inode(struct inode *, ino_t);
50int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **); 50int nilfs_ifile_get_inode_block(struct inode *, ino_t, struct buffer_head **);
51 51
52struct inode *nilfs_ifile_new(struct nilfs_sb_info *sbi, size_t inode_size);
53
52#endif /* _NILFS_IFILE_H */ 54#endif /* _NILFS_IFILE_H */
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 2a0a5a3ac134..7868cc122ac7 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -97,6 +97,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
97 nilfs_transaction_abort(inode->i_sb); 97 nilfs_transaction_abort(inode->i_sb);
98 goto out; 98 goto out;
99 } 99 }
100 nilfs_mark_inode_dirty(inode);
100 nilfs_transaction_commit(inode->i_sb); /* never fails */ 101 nilfs_transaction_commit(inode->i_sb); /* never fails */
101 /* Error handling should be detailed */ 102 /* Error handling should be detailed */
102 set_buffer_new(bh_result); 103 set_buffer_new(bh_result);
@@ -322,7 +323,6 @@ struct inode *nilfs_new_inode(struct inode *dir, int mode)
322 nilfs_init_acl(), proper cancellation of 323 nilfs_init_acl(), proper cancellation of
323 above jobs should be considered */ 324 above jobs should be considered */
324 325
325 mark_inode_dirty(inode);
326 return inode; 326 return inode;
327 327
328 failed_acl: 328 failed_acl:
@@ -525,7 +525,6 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
525 525
526 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh); 526 raw_inode = nilfs_ifile_map_inode(sbi->s_ifile, ino, ibh);
527 527
528 /* The buffer is guarded with lock_buffer() by the caller */
529 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state)) 528 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
530 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size); 529 memset(raw_inode, 0, NILFS_MDT(sbi->s_ifile)->mi_entry_size);
531 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state); 530 set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
@@ -599,6 +598,7 @@ void nilfs_truncate(struct inode *inode)
599 if (IS_SYNC(inode)) 598 if (IS_SYNC(inode))
600 nilfs_set_transaction_flag(NILFS_TI_SYNC); 599 nilfs_set_transaction_flag(NILFS_TI_SYNC);
601 600
601 nilfs_mark_inode_dirty(inode);
602 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0); 602 nilfs_set_file_dirty(NILFS_SB(sb), inode, 0);
603 nilfs_transaction_commit(sb); 603 nilfs_transaction_commit(sb);
604 /* May construct a logical segment and may fail in sync mode. 604 /* May construct a logical segment and may fail in sync mode.
@@ -623,6 +623,7 @@ void nilfs_delete_inode(struct inode *inode)
623 truncate_inode_pages(&inode->i_data, 0); 623 truncate_inode_pages(&inode->i_data, 0);
624 624
625 nilfs_truncate_bmap(ii, 0); 625 nilfs_truncate_bmap(ii, 0);
626 nilfs_mark_inode_dirty(inode);
626 nilfs_free_inode(inode); 627 nilfs_free_inode(inode);
627 /* nilfs_free_inode() marks inode buffer dirty */ 628 /* nilfs_free_inode() marks inode buffer dirty */
628 if (IS_SYNC(inode)) 629 if (IS_SYNC(inode))
@@ -745,9 +746,7 @@ int nilfs_mark_inode_dirty(struct inode *inode)
745 "failed to reget inode block.\n"); 746 "failed to reget inode block.\n");
746 return err; 747 return err;
747 } 748 }
748 lock_buffer(ibh);
749 nilfs_update_inode(inode, ibh); 749 nilfs_update_inode(inode, ibh);
750 unlock_buffer(ibh);
751 nilfs_mdt_mark_buffer_dirty(ibh); 750 nilfs_mdt_mark_buffer_dirty(ibh);
752 nilfs_mdt_mark_dirty(sbi->s_ifile); 751 nilfs_mdt_mark_dirty(sbi->s_ifile);
753 brelse(ibh); 752 brelse(ibh);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index f6326112d647..06713ffcc7f2 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -186,7 +186,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
186} 186}
187 187
188static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, 188static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
189 struct buffer_head **out_bh) 189 int readahead, struct buffer_head **out_bh)
190{ 190{
191 struct buffer_head *first_bh, *bh; 191 struct buffer_head *first_bh, *bh;
192 unsigned long blkoff; 192 unsigned long blkoff;
@@ -200,16 +200,18 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
200 if (unlikely(err)) 200 if (unlikely(err))
201 goto failed; 201 goto failed;
202 202
203 blkoff = block + 1; 203 if (readahead) {
204 for (i = 0; i < nr_ra_blocks; i++, blkoff++) { 204 blkoff = block + 1;
205 err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh); 205 for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
206 if (likely(!err || err == -EEXIST)) 206 err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
207 brelse(bh); 207 if (likely(!err || err == -EEXIST))
208 else if (err != -EBUSY) 208 brelse(bh);
209 break; /* abort readahead if bmap lookup failed */ 209 else if (err != -EBUSY)
210 210 break;
211 if (!buffer_locked(first_bh)) 211 /* abort readahead if bmap lookup failed */
212 goto out_no_wait; 212 if (!buffer_locked(first_bh))
213 goto out_no_wait;
214 }
213 } 215 }
214 216
215 wait_on_buffer(first_bh); 217 wait_on_buffer(first_bh);
@@ -263,7 +265,7 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
263 265
264 /* Should be rewritten with merging nilfs_mdt_read_block() */ 266 /* Should be rewritten with merging nilfs_mdt_read_block() */
265 retry: 267 retry:
266 ret = nilfs_mdt_read_block(inode, blkoff, out_bh); 268 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh);
267 if (!create || ret != -ENOENT) 269 if (!create || ret != -ENOENT)
268 return ret; 270 return ret;
269 271
@@ -371,7 +373,7 @@ int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block)
371 struct buffer_head *bh; 373 struct buffer_head *bh;
372 int err; 374 int err;
373 375
374 err = nilfs_mdt_read_block(inode, block, &bh); 376 err = nilfs_mdt_read_block(inode, block, 0, &bh);
375 if (unlikely(err)) 377 if (unlikely(err))
376 return err; 378 return err;
377 nilfs_mark_buffer_dirty(bh); 379 nilfs_mark_buffer_dirty(bh);
@@ -445,9 +447,17 @@ static const struct file_operations def_mdt_fops;
445 * longer than those of the super block structs; they may continue for 447 * longer than those of the super block structs; they may continue for
446 * several consecutive mounts/umounts. This would need discussions. 448 * several consecutive mounts/umounts. This would need discussions.
447 */ 449 */
450/**
451 * nilfs_mdt_new_common - allocate a pseudo inode for metadata file
452 * @nilfs: nilfs object
453 * @sb: super block instance the metadata file belongs to
454 * @ino: inode number
455 * @gfp_mask: gfp mask for data pages
456 * @objsz: size of the private object attached to inode->i_private
457 */
448struct inode * 458struct inode *
449nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb, 459nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
450 ino_t ino, gfp_t gfp_mask) 460 ino_t ino, gfp_t gfp_mask, size_t objsz)
451{ 461{
452 struct inode *inode = nilfs_alloc_inode_common(nilfs); 462 struct inode *inode = nilfs_alloc_inode_common(nilfs);
453 463
@@ -455,8 +465,9 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
455 return NULL; 465 return NULL;
456 else { 466 else {
457 struct address_space * const mapping = &inode->i_data; 467 struct address_space * const mapping = &inode->i_data;
458 struct nilfs_mdt_info *mi = kzalloc(sizeof(*mi), GFP_NOFS); 468 struct nilfs_mdt_info *mi;
459 469
470 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS);
460 if (!mi) { 471 if (!mi) {
461 nilfs_destroy_inode(inode); 472 nilfs_destroy_inode(inode);
462 return NULL; 473 return NULL;
@@ -513,11 +524,11 @@ nilfs_mdt_new_common(struct the_nilfs *nilfs, struct super_block *sb,
513} 524}
514 525
515struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb, 526struct inode *nilfs_mdt_new(struct the_nilfs *nilfs, struct super_block *sb,
516 ino_t ino) 527 ino_t ino, size_t objsz)
517{ 528{
518 struct inode *inode = nilfs_mdt_new_common(nilfs, sb, ino, 529 struct inode *inode;
519 NILFS_MDT_GFP);
520 530
531 inode = nilfs_mdt_new_common(nilfs, sb, ino, NILFS_MDT_GFP, objsz);
521 if (!inode) 532 if (!inode)
522 return NULL; 533 return NULL;
523 534
@@ -544,14 +555,15 @@ void nilfs_mdt_set_shadow(struct inode *orig, struct inode *shadow)
544 &NILFS_I(orig)->i_btnode_cache; 555 &NILFS_I(orig)->i_btnode_cache;
545} 556}
546 557
547void nilfs_mdt_clear(struct inode *inode) 558static void nilfs_mdt_clear(struct inode *inode)
548{ 559{
549 struct nilfs_inode_info *ii = NILFS_I(inode); 560 struct nilfs_inode_info *ii = NILFS_I(inode);
550 561
551 invalidate_mapping_pages(inode->i_mapping, 0, -1); 562 invalidate_mapping_pages(inode->i_mapping, 0, -1);
552 truncate_inode_pages(inode->i_mapping, 0); 563 truncate_inode_pages(inode->i_mapping, 0);
553 564
554 nilfs_bmap_clear(ii->i_bmap); 565 if (test_bit(NILFS_I_BMAP, &ii->i_state))
566 nilfs_bmap_clear(ii->i_bmap);
555 nilfs_btnode_cache_clear(&ii->i_btnode_cache); 567 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
556} 568}
557 569
@@ -559,6 +571,10 @@ void nilfs_mdt_destroy(struct inode *inode)
559{ 571{
560 struct nilfs_mdt_info *mdi = NILFS_MDT(inode); 572 struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
561 573
574 if (mdi->mi_palloc_cache)
575 nilfs_palloc_destroy_cache(inode);
576 nilfs_mdt_clear(inode);
577
562 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ 578 kfree(mdi->mi_bgl); /* kfree(NULL) is safe */
563 kfree(mdi); 579 kfree(mdi);
564 nilfs_destroy_inode(inode); 580 nilfs_destroy_inode(inode);
diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h
index 431599733c9b..6c4bbb0470fc 100644
--- a/fs/nilfs2/mdt.h
+++ b/fs/nilfs2/mdt.h
@@ -36,6 +36,7 @@
36 * @mi_entry_size: size of an entry 36 * @mi_entry_size: size of an entry
37 * @mi_first_entry_offset: offset to the first entry 37 * @mi_first_entry_offset: offset to the first entry
38 * @mi_entries_per_block: number of entries in a block 38 * @mi_entries_per_block: number of entries in a block
39 * @mi_palloc_cache: persistent object allocator cache
39 * @mi_blocks_per_group: number of blocks in a group 40 * @mi_blocks_per_group: number of blocks in a group
40 * @mi_blocks_per_desc_block: number of blocks per descriptor block 41 * @mi_blocks_per_desc_block: number of blocks per descriptor block
41 */ 42 */
@@ -46,6 +47,7 @@ struct nilfs_mdt_info {
46 unsigned mi_entry_size; 47 unsigned mi_entry_size;
47 unsigned mi_first_entry_offset; 48 unsigned mi_first_entry_offset;
48 unsigned long mi_entries_per_block; 49 unsigned long mi_entries_per_block;
50 struct nilfs_palloc_cache *mi_palloc_cache;
49 unsigned long mi_blocks_per_group; 51 unsigned long mi_blocks_per_group;
50 unsigned long mi_blocks_per_desc_block; 52 unsigned long mi_blocks_per_desc_block;
51}; 53};
@@ -74,11 +76,11 @@ int nilfs_mdt_forget_block(struct inode *, unsigned long);
74int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long); 76int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long);
75int nilfs_mdt_fetch_dirty(struct inode *); 77int nilfs_mdt_fetch_dirty(struct inode *);
76 78
77struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t); 79struct inode *nilfs_mdt_new(struct the_nilfs *, struct super_block *, ino_t,
80 size_t);
78struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *, 81struct inode *nilfs_mdt_new_common(struct the_nilfs *, struct super_block *,
79 ino_t, gfp_t); 82 ino_t, gfp_t, size_t);
80void nilfs_mdt_destroy(struct inode *); 83void nilfs_mdt_destroy(struct inode *);
81void nilfs_mdt_clear(struct inode *);
82void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned); 84void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned);
83void nilfs_mdt_set_shadow(struct inode *, struct inode *); 85void nilfs_mdt_set_shadow(struct inode *, struct inode *);
84 86
@@ -104,21 +106,4 @@ static inline __u64 nilfs_mdt_cno(struct inode *inode)
104#define nilfs_mdt_bgl_lock(inode, bg) \ 106#define nilfs_mdt_bgl_lock(inode, bg) \
105 (&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock) 107 (&NILFS_MDT(inode)->mi_bgl->locks[(bg) & (NR_BG_LOCKS-1)].lock)
106 108
107
108static inline int
109nilfs_mdt_read_inode_direct(struct inode *inode, struct buffer_head *bh,
110 unsigned n)
111{
112 return nilfs_read_inode_common(
113 inode, (struct nilfs_inode *)(bh->b_data + n));
114}
115
116static inline void
117nilfs_mdt_write_inode_direct(struct inode *inode, struct buffer_head *bh,
118 unsigned n)
119{
120 nilfs_write_inode_common(
121 inode, (struct nilfs_inode *)(bh->b_data + n), 1);
122}
123
124#endif /* _NILFS_MDT_H */ 109#endif /* _NILFS_MDT_H */
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index ed02e886fa79..07ba838ef089 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -120,7 +120,7 @@ static int nilfs_create(struct inode *dir, struct dentry *dentry, int mode,
120 inode->i_op = &nilfs_file_inode_operations; 120 inode->i_op = &nilfs_file_inode_operations;
121 inode->i_fop = &nilfs_file_operations; 121 inode->i_fop = &nilfs_file_operations;
122 inode->i_mapping->a_ops = &nilfs_aops; 122 inode->i_mapping->a_ops = &nilfs_aops;
123 mark_inode_dirty(inode); 123 nilfs_mark_inode_dirty(inode);
124 err = nilfs_add_nondir(dentry, inode); 124 err = nilfs_add_nondir(dentry, inode);
125 } 125 }
126 if (!err) 126 if (!err)
@@ -148,7 +148,7 @@ nilfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
148 err = PTR_ERR(inode); 148 err = PTR_ERR(inode);
149 if (!IS_ERR(inode)) { 149 if (!IS_ERR(inode)) {
150 init_special_inode(inode, inode->i_mode, rdev); 150 init_special_inode(inode, inode->i_mode, rdev);
151 mark_inode_dirty(inode); 151 nilfs_mark_inode_dirty(inode);
152 err = nilfs_add_nondir(dentry, inode); 152 err = nilfs_add_nondir(dentry, inode);
153 } 153 }
154 if (!err) 154 if (!err)
@@ -188,7 +188,7 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry,
188 goto out_fail; 188 goto out_fail;
189 189
190 /* mark_inode_dirty(inode); */ 190 /* mark_inode_dirty(inode); */
191 /* nilfs_new_inode() and page_symlink() do this */ 191 /* page_symlink() do this */
192 192
193 err = nilfs_add_nondir(dentry, inode); 193 err = nilfs_add_nondir(dentry, inode);
194out: 194out:
@@ -200,7 +200,8 @@ out:
200 return err; 200 return err;
201 201
202out_fail: 202out_fail:
203 inode_dec_link_count(inode); 203 drop_nlink(inode);
204 nilfs_mark_inode_dirty(inode);
204 iput(inode); 205 iput(inode);
205 goto out; 206 goto out;
206} 207}
@@ -245,7 +246,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
245 if (err) 246 if (err)
246 return err; 247 return err;
247 248
248 inode_inc_link_count(dir); 249 inc_nlink(dir);
249 250
250 inode = nilfs_new_inode(dir, S_IFDIR | mode); 251 inode = nilfs_new_inode(dir, S_IFDIR | mode);
251 err = PTR_ERR(inode); 252 err = PTR_ERR(inode);
@@ -256,7 +257,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
256 inode->i_fop = &nilfs_dir_operations; 257 inode->i_fop = &nilfs_dir_operations;
257 inode->i_mapping->a_ops = &nilfs_aops; 258 inode->i_mapping->a_ops = &nilfs_aops;
258 259
259 inode_inc_link_count(inode); 260 inc_nlink(inode);
260 261
261 err = nilfs_make_empty(inode, dir); 262 err = nilfs_make_empty(inode, dir);
262 if (err) 263 if (err)
@@ -266,6 +267,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
266 if (err) 267 if (err)
267 goto out_fail; 268 goto out_fail;
268 269
270 nilfs_mark_inode_dirty(inode);
269 d_instantiate(dentry, inode); 271 d_instantiate(dentry, inode);
270out: 272out:
271 if (!err) 273 if (!err)
@@ -276,26 +278,23 @@ out:
276 return err; 278 return err;
277 279
278out_fail: 280out_fail:
279 inode_dec_link_count(inode); 281 drop_nlink(inode);
280 inode_dec_link_count(inode); 282 drop_nlink(inode);
283 nilfs_mark_inode_dirty(inode);
281 iput(inode); 284 iput(inode);
282out_dir: 285out_dir:
283 inode_dec_link_count(dir); 286 drop_nlink(dir);
287 nilfs_mark_inode_dirty(dir);
284 goto out; 288 goto out;
285} 289}
286 290
287static int nilfs_unlink(struct inode *dir, struct dentry *dentry) 291static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
288{ 292{
289 struct inode *inode; 293 struct inode *inode;
290 struct nilfs_dir_entry *de; 294 struct nilfs_dir_entry *de;
291 struct page *page; 295 struct page *page;
292 struct nilfs_transaction_info ti;
293 int err; 296 int err;
294 297
295 err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
296 if (err)
297 return err;
298
299 err = -ENOENT; 298 err = -ENOENT;
300 de = nilfs_find_entry(dir, dentry, &page); 299 de = nilfs_find_entry(dir, dentry, &page);
301 if (!de) 300 if (!de)
@@ -317,12 +316,28 @@ static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
317 goto out; 316 goto out;
318 317
319 inode->i_ctime = dir->i_ctime; 318 inode->i_ctime = dir->i_ctime;
320 inode_dec_link_count(inode); 319 drop_nlink(inode);
321 err = 0; 320 err = 0;
322out: 321out:
323 if (!err) 322 return err;
323}
324
325static int nilfs_unlink(struct inode *dir, struct dentry *dentry)
326{
327 struct nilfs_transaction_info ti;
328 int err;
329
330 err = nilfs_transaction_begin(dir->i_sb, &ti, 0);
331 if (err)
332 return err;
333
334 err = nilfs_do_unlink(dir, dentry);
335
336 if (!err) {
337 nilfs_mark_inode_dirty(dir);
338 nilfs_mark_inode_dirty(dentry->d_inode);
324 err = nilfs_transaction_commit(dir->i_sb); 339 err = nilfs_transaction_commit(dir->i_sb);
325 else 340 } else
326 nilfs_transaction_abort(dir->i_sb); 341 nilfs_transaction_abort(dir->i_sb);
327 342
328 return err; 343 return err;
@@ -340,11 +355,13 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry)
340 355
341 err = -ENOTEMPTY; 356 err = -ENOTEMPTY;
342 if (nilfs_empty_dir(inode)) { 357 if (nilfs_empty_dir(inode)) {
343 err = nilfs_unlink(dir, dentry); 358 err = nilfs_do_unlink(dir, dentry);
344 if (!err) { 359 if (!err) {
345 inode->i_size = 0; 360 inode->i_size = 0;
346 inode_dec_link_count(inode); 361 drop_nlink(inode);
347 inode_dec_link_count(dir); 362 nilfs_mark_inode_dirty(inode);
363 drop_nlink(dir);
364 nilfs_mark_inode_dirty(dir);
348 } 365 }
349 } 366 }
350 if (!err) 367 if (!err)
@@ -395,42 +412,48 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
395 new_de = nilfs_find_entry(new_dir, new_dentry, &new_page); 412 new_de = nilfs_find_entry(new_dir, new_dentry, &new_page);
396 if (!new_de) 413 if (!new_de)
397 goto out_dir; 414 goto out_dir;
398 inode_inc_link_count(old_inode); 415 inc_nlink(old_inode);
399 nilfs_set_link(new_dir, new_de, new_page, old_inode); 416 nilfs_set_link(new_dir, new_de, new_page, old_inode);
417 nilfs_mark_inode_dirty(new_dir);
400 new_inode->i_ctime = CURRENT_TIME; 418 new_inode->i_ctime = CURRENT_TIME;
401 if (dir_de) 419 if (dir_de)
402 drop_nlink(new_inode); 420 drop_nlink(new_inode);
403 inode_dec_link_count(new_inode); 421 drop_nlink(new_inode);
422 nilfs_mark_inode_dirty(new_inode);
404 } else { 423 } else {
405 if (dir_de) { 424 if (dir_de) {
406 err = -EMLINK; 425 err = -EMLINK;
407 if (new_dir->i_nlink >= NILFS_LINK_MAX) 426 if (new_dir->i_nlink >= NILFS_LINK_MAX)
408 goto out_dir; 427 goto out_dir;
409 } 428 }
410 inode_inc_link_count(old_inode); 429 inc_nlink(old_inode);
411 err = nilfs_add_link(new_dentry, old_inode); 430 err = nilfs_add_link(new_dentry, old_inode);
412 if (err) { 431 if (err) {
413 inode_dec_link_count(old_inode); 432 drop_nlink(old_inode);
433 nilfs_mark_inode_dirty(old_inode);
414 goto out_dir; 434 goto out_dir;
415 } 435 }
416 if (dir_de) 436 if (dir_de) {
417 inode_inc_link_count(new_dir); 437 inc_nlink(new_dir);
438 nilfs_mark_inode_dirty(new_dir);
439 }
418 } 440 }
419 441
420 /* 442 /*
421 * Like most other Unix systems, set the ctime for inodes on a 443 * Like most other Unix systems, set the ctime for inodes on a
422 * rename. 444 * rename.
423 * inode_dec_link_count() will mark the inode dirty.
424 */ 445 */
425 old_inode->i_ctime = CURRENT_TIME; 446 old_inode->i_ctime = CURRENT_TIME;
426 447
427 nilfs_delete_entry(old_de, old_page); 448 nilfs_delete_entry(old_de, old_page);
428 inode_dec_link_count(old_inode); 449 drop_nlink(old_inode);
429 450
430 if (dir_de) { 451 if (dir_de) {
431 nilfs_set_link(old_inode, dir_de, dir_page, new_dir); 452 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
432 inode_dec_link_count(old_dir); 453 drop_nlink(old_dir);
433 } 454 }
455 nilfs_mark_inode_dirty(old_dir);
456 nilfs_mark_inode_dirty(old_inode);
434 457
435 err = nilfs_transaction_commit(old_dir->i_sb); 458 err = nilfs_transaction_commit(old_dir->i_sb);
436 return err; 459 return err;
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index 6dc83591d118..c9c96c7825dc 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -770,14 +770,8 @@ int nilfs_recover_logical_segments(struct the_nilfs *nilfs,
770 nilfs_finish_roll_forward(nilfs, sbi, ri); 770 nilfs_finish_roll_forward(nilfs, sbi, ri);
771 } 771 }
772 772
773 nilfs_detach_checkpoint(sbi);
774 return 0;
775
776 failed: 773 failed:
777 nilfs_detach_checkpoint(sbi); 774 nilfs_detach_checkpoint(sbi);
778 nilfs_mdt_clear(nilfs->ns_cpfile);
779 nilfs_mdt_clear(nilfs->ns_sufile);
780 nilfs_mdt_clear(nilfs->ns_dat);
781 return err; 775 return err;
782} 776}
783 777
@@ -804,6 +798,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
804 struct nilfs_segsum_info ssi; 798 struct nilfs_segsum_info ssi;
805 sector_t pseg_start, pseg_end, sr_pseg_start = 0; 799 sector_t pseg_start, pseg_end, sr_pseg_start = 0;
806 sector_t seg_start, seg_end; /* range of full segment (block number) */ 800 sector_t seg_start, seg_end; /* range of full segment (block number) */
801 sector_t b, end;
807 u64 seg_seq; 802 u64 seg_seq;
808 __u64 segnum, nextnum = 0; 803 __u64 segnum, nextnum = 0;
809 __u64 cno; 804 __u64 cno;
@@ -819,6 +814,11 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
819 /* Calculate range of segment */ 814 /* Calculate range of segment */
820 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 815 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
821 816
817 /* Read ahead segment */
818 b = seg_start;
819 while (b <= seg_end)
820 sb_breadahead(sbi->s_super, b++);
821
822 for (;;) { 822 for (;;) {
823 /* Load segment summary */ 823 /* Load segment summary */
824 ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1); 824 ret = load_segment_summary(sbi, pseg_start, seg_seq, &ssi, 1);
@@ -841,14 +841,20 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
841 ri->ri_nextnum = nextnum; 841 ri->ri_nextnum = nextnum;
842 empty_seg = 0; 842 empty_seg = 0;
843 843
844 if (!NILFS_SEG_HAS_SR(&ssi) && !scan_newer) {
845 /* This will never happen because a superblock
846 (last_segment) always points to a pseg
847 having a super root. */
848 ret = NILFS_SEG_FAIL_CONSISTENCY;
849 goto failed;
850 }
851
852 if (pseg_start == seg_start) {
853 nilfs_get_segment_range(nilfs, nextnum, &b, &end);
854 while (b <= end)
855 sb_breadahead(sbi->s_super, b++);
856 }
844 if (!NILFS_SEG_HAS_SR(&ssi)) { 857 if (!NILFS_SEG_HAS_SR(&ssi)) {
845 if (!scan_newer) {
846 /* This will never happen because a superblock
847 (last_segment) always points to a pseg
848 having a super root. */
849 ret = NILFS_SEG_FAIL_CONSISTENCY;
850 goto failed;
851 }
852 if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) { 858 if (!ri->ri_lsegs_start && NILFS_SEG_LOGBGN(&ssi)) {
853 ri->ri_lsegs_start = pseg_start; 859 ri->ri_lsegs_start = pseg_start;
854 ri->ri_lsegs_start_seq = seg_seq; 860 ri->ri_lsegs_start_seq = seg_seq;
@@ -919,7 +925,7 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi,
919 925
920 super_root_found: 926 super_root_found:
921 /* Updating pointers relating to the latest checkpoint */ 927 /* Updating pointers relating to the latest checkpoint */
922 list_splice(&segments, ri->ri_used_segments.prev); 928 list_splice_tail(&segments, &ri->ri_used_segments);
923 nilfs->ns_last_pseg = sr_pseg_start; 929 nilfs->ns_last_pseg = sr_pseg_start;
924 nilfs->ns_last_seq = nilfs->ns_seg_seq; 930 nilfs->ns_last_seq = nilfs->ns_seg_seq;
925 nilfs->ns_last_cno = ri->ri_cno; 931 nilfs->ns_last_cno = ri->ri_cno;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index e6d9e37fa241..645c78656aa0 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -24,10 +24,22 @@
24#include <linux/buffer_head.h> 24#include <linux/buffer_head.h>
25#include <linux/writeback.h> 25#include <linux/writeback.h>
26#include <linux/crc32.h> 26#include <linux/crc32.h>
27#include <linux/backing-dev.h>
27#include "page.h" 28#include "page.h"
28#include "segbuf.h" 29#include "segbuf.h"
29 30
30 31
32struct nilfs_write_info {
33 struct the_nilfs *nilfs;
34 struct bio *bio;
35 int start, end; /* The region to be submitted */
36 int rest_blocks;
37 int max_pages;
38 int nr_vecs;
39 sector_t blocknr;
40};
41
42
31static struct kmem_cache *nilfs_segbuf_cachep; 43static struct kmem_cache *nilfs_segbuf_cachep;
32 44
33static void nilfs_segbuf_init_once(void *obj) 45static void nilfs_segbuf_init_once(void *obj)
@@ -63,6 +75,11 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *sb)
63 INIT_LIST_HEAD(&segbuf->sb_list); 75 INIT_LIST_HEAD(&segbuf->sb_list);
64 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers); 76 INIT_LIST_HEAD(&segbuf->sb_segsum_buffers);
65 INIT_LIST_HEAD(&segbuf->sb_payload_buffers); 77 INIT_LIST_HEAD(&segbuf->sb_payload_buffers);
78
79 init_completion(&segbuf->sb_bio_event);
80 atomic_set(&segbuf->sb_err, 0);
81 segbuf->sb_nbio = 0;
82
66 return segbuf; 83 return segbuf;
67} 84}
68 85
@@ -83,6 +100,22 @@ void nilfs_segbuf_map(struct nilfs_segment_buffer *segbuf, __u64 segnum,
83 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1; 100 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
84} 101}
85 102
103/**
104 * nilfs_segbuf_map_cont - map a new log behind a given log
105 * @segbuf: new segment buffer
106 * @prev: segment buffer containing a log to be continued
107 */
108void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
109 struct nilfs_segment_buffer *prev)
110{
111 segbuf->sb_segnum = prev->sb_segnum;
112 segbuf->sb_fseg_start = prev->sb_fseg_start;
113 segbuf->sb_fseg_end = prev->sb_fseg_end;
114 segbuf->sb_pseg_start = prev->sb_pseg_start + prev->sb_sum.nblocks;
115 segbuf->sb_rest_blocks =
116 segbuf->sb_fseg_end - segbuf->sb_pseg_start + 1;
117}
118
86void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf, 119void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *segbuf,
87 __u64 nextnum, struct the_nilfs *nilfs) 120 __u64 nextnum, struct the_nilfs *nilfs)
88{ 121{
@@ -132,8 +165,6 @@ int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags,
132 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary); 165 segbuf->sb_sum.sumbytes = sizeof(struct nilfs_segment_summary);
133 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0; 166 segbuf->sb_sum.nfinfo = segbuf->sb_sum.nfileblk = 0;
134 segbuf->sb_sum.ctime = ctime; 167 segbuf->sb_sum.ctime = ctime;
135
136 segbuf->sb_io_error = 0;
137 return 0; 168 return 0;
138} 169}
139 170
@@ -219,7 +250,7 @@ void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
219 raw_sum->ss_datasum = cpu_to_le32(crc); 250 raw_sum->ss_datasum = cpu_to_le32(crc);
220} 251}
221 252
222void nilfs_release_buffers(struct list_head *list) 253static void nilfs_release_buffers(struct list_head *list)
223{ 254{
224 struct buffer_head *bh, *n; 255 struct buffer_head *bh, *n;
225 256
@@ -241,13 +272,56 @@ void nilfs_release_buffers(struct list_head *list)
241 } 272 }
242} 273}
243 274
275static void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf)
276{
277 nilfs_release_buffers(&segbuf->sb_segsum_buffers);
278 nilfs_release_buffers(&segbuf->sb_payload_buffers);
279}
280
281/*
282 * Iterators for segment buffers
283 */
284void nilfs_clear_logs(struct list_head *logs)
285{
286 struct nilfs_segment_buffer *segbuf;
287
288 list_for_each_entry(segbuf, logs, sb_list)
289 nilfs_segbuf_clear(segbuf);
290}
291
292void nilfs_truncate_logs(struct list_head *logs,
293 struct nilfs_segment_buffer *last)
294{
295 struct nilfs_segment_buffer *n, *segbuf;
296
297 segbuf = list_prepare_entry(last, logs, sb_list);
298 list_for_each_entry_safe_continue(segbuf, n, logs, sb_list) {
299 list_del_init(&segbuf->sb_list);
300 nilfs_segbuf_clear(segbuf);
301 nilfs_segbuf_free(segbuf);
302 }
303}
304
305int nilfs_wait_on_logs(struct list_head *logs)
306{
307 struct nilfs_segment_buffer *segbuf;
308 int err;
309
310 list_for_each_entry(segbuf, logs, sb_list) {
311 err = nilfs_segbuf_wait(segbuf);
312 if (err)
313 return err;
314 }
315 return 0;
316}
317
244/* 318/*
245 * BIO operations 319 * BIO operations
246 */ 320 */
247static void nilfs_end_bio_write(struct bio *bio, int err) 321static void nilfs_end_bio_write(struct bio *bio, int err)
248{ 322{
249 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 323 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
250 struct nilfs_write_info *wi = bio->bi_private; 324 struct nilfs_segment_buffer *segbuf = bio->bi_private;
251 325
252 if (err == -EOPNOTSUPP) { 326 if (err == -EOPNOTSUPP) {
253 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 327 set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
@@ -256,21 +330,22 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
256 } 330 }
257 331
258 if (!uptodate) 332 if (!uptodate)
259 atomic_inc(&wi->err); 333 atomic_inc(&segbuf->sb_err);
260 334
261 bio_put(bio); 335 bio_put(bio);
262 complete(&wi->bio_event); 336 complete(&segbuf->sb_bio_event);
263} 337}
264 338
265static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode) 339static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
340 struct nilfs_write_info *wi, int mode)
266{ 341{
267 struct bio *bio = wi->bio; 342 struct bio *bio = wi->bio;
268 int err; 343 int err;
269 344
270 if (wi->nbio > 0 && bdi_write_congested(wi->bdi)) { 345 if (segbuf->sb_nbio > 0 && bdi_write_congested(wi->nilfs->ns_bdi)) {
271 wait_for_completion(&wi->bio_event); 346 wait_for_completion(&segbuf->sb_bio_event);
272 wi->nbio--; 347 segbuf->sb_nbio--;
273 if (unlikely(atomic_read(&wi->err))) { 348 if (unlikely(atomic_read(&segbuf->sb_err))) {
274 bio_put(bio); 349 bio_put(bio);
275 err = -EIO; 350 err = -EIO;
276 goto failed; 351 goto failed;
@@ -278,7 +353,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
278 } 353 }
279 354
280 bio->bi_end_io = nilfs_end_bio_write; 355 bio->bi_end_io = nilfs_end_bio_write;
281 bio->bi_private = wi; 356 bio->bi_private = segbuf;
282 bio_get(bio); 357 bio_get(bio);
283 submit_bio(mode, bio); 358 submit_bio(mode, bio);
284 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 359 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
@@ -286,7 +361,7 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
286 err = -EOPNOTSUPP; 361 err = -EOPNOTSUPP;
287 goto failed; 362 goto failed;
288 } 363 }
289 wi->nbio++; 364 segbuf->sb_nbio++;
290 bio_put(bio); 365 bio_put(bio);
291 366
292 wi->bio = NULL; 367 wi->bio = NULL;
@@ -301,17 +376,15 @@ static int nilfs_submit_seg_bio(struct nilfs_write_info *wi, int mode)
301} 376}
302 377
303/** 378/**
304 * nilfs_alloc_seg_bio - allocate a bio for writing segment. 379 * nilfs_alloc_seg_bio - allocate a new bio for writing log
305 * @sb: super block 380 * @nilfs: nilfs object
306 * @start: beginning disk block number of this BIO. 381 * @start: start block number of the bio
307 * @nr_vecs: request size of page vector. 382 * @nr_vecs: request size of page vector.
308 * 383 *
309 * alloc_seg_bio() allocates a new BIO structure and initialize it.
310 *
311 * Return Value: On success, pointer to the struct bio is returned. 384 * Return Value: On success, pointer to the struct bio is returned.
312 * On error, NULL is returned. 385 * On error, NULL is returned.
313 */ 386 */
314static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start, 387static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
315 int nr_vecs) 388 int nr_vecs)
316{ 389{
317 struct bio *bio; 390 struct bio *bio;
@@ -322,36 +395,33 @@ static struct bio *nilfs_alloc_seg_bio(struct super_block *sb, sector_t start,
322 bio = bio_alloc(GFP_NOIO, nr_vecs); 395 bio = bio_alloc(GFP_NOIO, nr_vecs);
323 } 396 }
324 if (likely(bio)) { 397 if (likely(bio)) {
325 bio->bi_bdev = sb->s_bdev; 398 bio->bi_bdev = nilfs->ns_bdev;
326 bio->bi_sector = (sector_t)start << (sb->s_blocksize_bits - 9); 399 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9);
327 } 400 }
328 return bio; 401 return bio;
329} 402}
330 403
331void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf, 404static void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *segbuf,
332 struct nilfs_write_info *wi) 405 struct nilfs_write_info *wi)
333{ 406{
334 wi->bio = NULL; 407 wi->bio = NULL;
335 wi->rest_blocks = segbuf->sb_sum.nblocks; 408 wi->rest_blocks = segbuf->sb_sum.nblocks;
336 wi->max_pages = bio_get_nr_vecs(wi->sb->s_bdev); 409 wi->max_pages = bio_get_nr_vecs(wi->nilfs->ns_bdev);
337 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks); 410 wi->nr_vecs = min(wi->max_pages, wi->rest_blocks);
338 wi->start = wi->end = 0; 411 wi->start = wi->end = 0;
339 wi->nbio = 0;
340 wi->blocknr = segbuf->sb_pseg_start; 412 wi->blocknr = segbuf->sb_pseg_start;
341
342 atomic_set(&wi->err, 0);
343 init_completion(&wi->bio_event);
344} 413}
345 414
346static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh, 415static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
347 int mode) 416 struct nilfs_write_info *wi,
417 struct buffer_head *bh, int mode)
348{ 418{
349 int len, err; 419 int len, err;
350 420
351 BUG_ON(wi->nr_vecs <= 0); 421 BUG_ON(wi->nr_vecs <= 0);
352 repeat: 422 repeat:
353 if (!wi->bio) { 423 if (!wi->bio) {
354 wi->bio = nilfs_alloc_seg_bio(wi->sb, wi->blocknr + wi->end, 424 wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
355 wi->nr_vecs); 425 wi->nr_vecs);
356 if (unlikely(!wi->bio)) 426 if (unlikely(!wi->bio))
357 return -ENOMEM; 427 return -ENOMEM;
@@ -363,76 +433,83 @@ static int nilfs_submit_bh(struct nilfs_write_info *wi, struct buffer_head *bh,
363 return 0; 433 return 0;
364 } 434 }
365 /* bio is FULL */ 435 /* bio is FULL */
366 err = nilfs_submit_seg_bio(wi, mode); 436 err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
367 /* never submit current bh */ 437 /* never submit current bh */
368 if (likely(!err)) 438 if (likely(!err))
369 goto repeat; 439 goto repeat;
370 return err; 440 return err;
371} 441}
372 442
443/**
444 * nilfs_segbuf_write - submit write requests of a log
445 * @segbuf: buffer storing a log to be written
446 * @nilfs: nilfs object
447 *
448 * Return Value: On Success, 0 is returned. On Error, one of the following
449 * negative error code is returned.
450 *
451 * %-EIO - I/O error
452 *
453 * %-ENOMEM - Insufficient memory available.
454 */
373int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, 455int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
374 struct nilfs_write_info *wi) 456 struct the_nilfs *nilfs)
375{ 457{
458 struct nilfs_write_info wi;
376 struct buffer_head *bh; 459 struct buffer_head *bh;
377 int res, rw = WRITE; 460 int res = 0, rw = WRITE;
461
462 wi.nilfs = nilfs;
463 nilfs_segbuf_prepare_write(segbuf, &wi);
378 464
379 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) { 465 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
380 res = nilfs_submit_bh(wi, bh, rw); 466 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
381 if (unlikely(res)) 467 if (unlikely(res))
382 goto failed_bio; 468 goto failed_bio;
383 } 469 }
384 470
385 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 471 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
386 res = nilfs_submit_bh(wi, bh, rw); 472 res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
387 if (unlikely(res)) 473 if (unlikely(res))
388 goto failed_bio; 474 goto failed_bio;
389 } 475 }
390 476
391 if (wi->bio) { 477 if (wi.bio) {
392 /* 478 /*
393 * Last BIO is always sent through the following 479 * Last BIO is always sent through the following
394 * submission. 480 * submission.
395 */ 481 */
396 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 482 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
397 res = nilfs_submit_seg_bio(wi, rw); 483 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
398 if (unlikely(res))
399 goto failed_bio;
400 } 484 }
401 485
402 res = 0;
403 out:
404 return res;
405
406 failed_bio: 486 failed_bio:
407 atomic_inc(&wi->err); 487 return res;
408 goto out;
409} 488}
410 489
411/** 490/**
412 * nilfs_segbuf_wait - wait for completion of requested BIOs 491 * nilfs_segbuf_wait - wait for completion of requested BIOs
413 * @wi: nilfs_write_info 492 * @segbuf: segment buffer
414 * 493 *
415 * Return Value: On Success, 0 is returned. On Error, one of the following 494 * Return Value: On Success, 0 is returned. On Error, one of the following
416 * negative error code is returned. 495 * negative error code is returned.
417 * 496 *
418 * %-EIO - I/O error 497 * %-EIO - I/O error
419 */ 498 */
420int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf, 499int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
421 struct nilfs_write_info *wi)
422{ 500{
423 int err = 0; 501 int err = 0;
424 502
425 if (!wi->nbio) 503 if (!segbuf->sb_nbio)
426 return 0; 504 return 0;
427 505
428 do { 506 do {
429 wait_for_completion(&wi->bio_event); 507 wait_for_completion(&segbuf->sb_bio_event);
430 } while (--wi->nbio > 0); 508 } while (--segbuf->sb_nbio > 0);
431 509
432 if (unlikely(atomic_read(&wi->err) > 0)) { 510 if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
433 printk(KERN_ERR "NILFS: IO error writing segment\n"); 511 printk(KERN_ERR "NILFS: IO error writing segment\n");
434 err = -EIO; 512 err = -EIO;
435 segbuf->sb_io_error = 1;
436 } 513 }
437 return err; 514 return err;
438} 515}
diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h
index 0c3076f4e592..6af1630fb401 100644
--- a/fs/nilfs2/segbuf.h
+++ b/fs/nilfs2/segbuf.h
@@ -27,7 +27,6 @@
27#include <linux/buffer_head.h> 27#include <linux/buffer_head.h>
28#include <linux/bio.h> 28#include <linux/bio.h>
29#include <linux/completion.h> 29#include <linux/completion.h>
30#include <linux/backing-dev.h>
31 30
32/** 31/**
33 * struct nilfs_segsum_info - On-memory segment summary 32 * struct nilfs_segsum_info - On-memory segment summary
@@ -77,7 +76,9 @@ struct nilfs_segsum_info {
77 * @sb_rest_blocks: Number of residual blocks in the current segment 76 * @sb_rest_blocks: Number of residual blocks in the current segment
78 * @sb_segsum_buffers: List of buffers for segment summaries 77 * @sb_segsum_buffers: List of buffers for segment summaries
79 * @sb_payload_buffers: List of buffers for segment payload 78 * @sb_payload_buffers: List of buffers for segment payload
80 * @sb_io_error: I/O error status 79 * @sb_nbio: Number of flying bio requests
80 * @sb_err: I/O error status
81 * @sb_bio_event: Completion event of log writing
81 */ 82 */
82struct nilfs_segment_buffer { 83struct nilfs_segment_buffer {
83 struct super_block *sb_super; 84 struct super_block *sb_super;
@@ -96,7 +97,9 @@ struct nilfs_segment_buffer {
96 struct list_head sb_payload_buffers; /* including super root */ 97 struct list_head sb_payload_buffers; /* including super root */
97 98
98 /* io status */ 99 /* io status */
99 int sb_io_error; 100 int sb_nbio;
101 atomic_t sb_err;
102 struct completion sb_bio_event;
100}; 103};
101 104
102#define NILFS_LIST_SEGBUF(head) \ 105#define NILFS_LIST_SEGBUF(head) \
@@ -125,6 +128,8 @@ struct nilfs_segment_buffer *nilfs_segbuf_new(struct super_block *);
125void nilfs_segbuf_free(struct nilfs_segment_buffer *); 128void nilfs_segbuf_free(struct nilfs_segment_buffer *);
126void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long, 129void nilfs_segbuf_map(struct nilfs_segment_buffer *, __u64, unsigned long,
127 struct the_nilfs *); 130 struct the_nilfs *);
131void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
132 struct nilfs_segment_buffer *prev);
128void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64, 133void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
129 struct the_nilfs *); 134 struct the_nilfs *);
130int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t); 135int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t);
@@ -161,41 +166,18 @@ nilfs_segbuf_add_file_buffer(struct nilfs_segment_buffer *segbuf,
161 segbuf->sb_sum.nfileblk++; 166 segbuf->sb_sum.nfileblk++;
162} 167}
163 168
164void nilfs_release_buffers(struct list_head *); 169int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
170 struct the_nilfs *nilfs);
171int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf);
165 172
166static inline void nilfs_segbuf_clear(struct nilfs_segment_buffer *segbuf) 173void nilfs_clear_logs(struct list_head *logs);
174void nilfs_truncate_logs(struct list_head *logs,
175 struct nilfs_segment_buffer *last);
176int nilfs_wait_on_logs(struct list_head *logs);
177
178static inline void nilfs_destroy_logs(struct list_head *logs)
167{ 179{
168 nilfs_release_buffers(&segbuf->sb_segsum_buffers); 180 nilfs_truncate_logs(logs, NULL);
169 nilfs_release_buffers(&segbuf->sb_payload_buffers);
170} 181}
171 182
172struct nilfs_write_info {
173 struct bio *bio;
174 int start, end; /* The region to be submitted */
175 int rest_blocks;
176 int max_pages;
177 int nr_vecs;
178 sector_t blocknr;
179
180 int nbio;
181 atomic_t err;
182 struct completion bio_event;
183 /* completion event of segment write */
184
185 /*
186 * The following fields must be set explicitly
187 */
188 struct super_block *sb;
189 struct backing_dev_info *bdi; /* backing dev info */
190 struct buffer_head *bh_sr;
191};
192
193
194void nilfs_segbuf_prepare_write(struct nilfs_segment_buffer *,
195 struct nilfs_write_info *);
196int nilfs_segbuf_write(struct nilfs_segment_buffer *,
197 struct nilfs_write_info *);
198int nilfs_segbuf_wait(struct nilfs_segment_buffer *,
199 struct nilfs_write_info *);
200
201#endif /* _NILFS_SEGBUF_H */ 183#endif /* _NILFS_SEGBUF_H */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 6eff66a070d5..17584c524486 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -974,12 +974,12 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
974 nilfs->ns_nongc_ctime : sci->sc_seg_ctime); 974 nilfs->ns_nongc_ctime : sci->sc_seg_ctime);
975 raw_sr->sr_flags = 0; 975 raw_sr->sr_flags = 0;
976 976
977 nilfs_mdt_write_inode_direct( 977 nilfs_write_inode_common(nilfs_dat_inode(nilfs), (void *)raw_sr +
978 nilfs_dat_inode(nilfs), bh_sr, NILFS_SR_DAT_OFFSET(isz)); 978 NILFS_SR_DAT_OFFSET(isz), 1);
979 nilfs_mdt_write_inode_direct( 979 nilfs_write_inode_common(nilfs->ns_cpfile, (void *)raw_sr +
980 nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(isz)); 980 NILFS_SR_CPFILE_OFFSET(isz), 1);
981 nilfs_mdt_write_inode_direct( 981 nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
982 nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(isz)); 982 NILFS_SR_SUFILE_OFFSET(isz), 1);
983} 983}
984 984
985static void nilfs_redirty_inodes(struct list_head *head) 985static void nilfs_redirty_inodes(struct list_head *head)
@@ -1273,73 +1273,75 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
1273 return err; 1273 return err;
1274} 1274}
1275 1275
1276static int nilfs_touch_segusage(struct inode *sufile, __u64 segnum) 1276/**
1277{ 1277 * nilfs_segctor_begin_construction - setup segment buffer to make a new log
1278 struct buffer_head *bh_su; 1278 * @sci: nilfs_sc_info
1279 struct nilfs_segment_usage *raw_su; 1279 * @nilfs: nilfs object
1280 int err; 1280 */
1281
1282 err = nilfs_sufile_get_segment_usage(sufile, segnum, &raw_su, &bh_su);
1283 if (unlikely(err))
1284 return err;
1285 nilfs_mdt_mark_buffer_dirty(bh_su);
1286 nilfs_mdt_mark_dirty(sufile);
1287 nilfs_sufile_put_segment_usage(sufile, segnum, bh_su);
1288 return 0;
1289}
1290
1291static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, 1281static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
1292 struct the_nilfs *nilfs) 1282 struct the_nilfs *nilfs)
1293{ 1283{
1294 struct nilfs_segment_buffer *segbuf, *n; 1284 struct nilfs_segment_buffer *segbuf, *prev;
1295 __u64 nextnum; 1285 __u64 nextnum;
1296 int err; 1286 int err, alloc = 0;
1297 1287
1298 if (list_empty(&sci->sc_segbufs)) { 1288 segbuf = nilfs_segbuf_new(sci->sc_super);
1299 segbuf = nilfs_segbuf_new(sci->sc_super); 1289 if (unlikely(!segbuf))
1300 if (unlikely(!segbuf)) 1290 return -ENOMEM;
1301 return -ENOMEM; 1291
1302 list_add(&segbuf->sb_list, &sci->sc_segbufs); 1292 if (list_empty(&sci->sc_write_logs)) {
1303 } else 1293 nilfs_segbuf_map(segbuf, nilfs->ns_segnum,
1304 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1294 nilfs->ns_pseg_offset, nilfs);
1295 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1296 nilfs_shift_to_next_segment(nilfs);
1297 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs);
1298 }
1305 1299
1306 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, nilfs->ns_pseg_offset, 1300 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1307 nilfs); 1301 nextnum = nilfs->ns_nextnum;
1308 1302
1309 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) { 1303 if (nilfs->ns_segnum == nilfs->ns_nextnum)
1310 nilfs_shift_to_next_segment(nilfs); 1304 /* Start from the head of a new full segment */
1311 nilfs_segbuf_map(segbuf, nilfs->ns_segnum, 0, nilfs); 1305 alloc++;
1306 } else {
1307 /* Continue logs */
1308 prev = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
1309 nilfs_segbuf_map_cont(segbuf, prev);
1310 segbuf->sb_sum.seg_seq = prev->sb_sum.seg_seq;
1311 nextnum = prev->sb_nextnum;
1312
1313 if (segbuf->sb_rest_blocks < NILFS_PSEG_MIN_BLOCKS) {
1314 nilfs_segbuf_map(segbuf, prev->sb_nextnum, 0, nilfs);
1315 segbuf->sb_sum.seg_seq++;
1316 alloc++;
1317 }
1312 } 1318 }
1313 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1314 1319
1315 err = nilfs_touch_segusage(nilfs->ns_sufile, segbuf->sb_segnum); 1320 err = nilfs_sufile_mark_dirty(nilfs->ns_sufile, segbuf->sb_segnum);
1316 if (unlikely(err)) 1321 if (err)
1317 return err; 1322 goto failed;
1318 1323
1319 if (nilfs->ns_segnum == nilfs->ns_nextnum) { 1324 if (alloc) {
1320 /* Start from the head of a new full segment */
1321 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum); 1325 err = nilfs_sufile_alloc(nilfs->ns_sufile, &nextnum);
1322 if (unlikely(err)) 1326 if (err)
1323 return err; 1327 goto failed;
1324 } else 1328 }
1325 nextnum = nilfs->ns_nextnum;
1326
1327 segbuf->sb_sum.seg_seq = nilfs->ns_seg_seq;
1328 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs); 1329 nilfs_segbuf_set_next_segnum(segbuf, nextnum, nilfs);
1329 1330
1330 /* truncating segment buffers */ 1331 BUG_ON(!list_empty(&sci->sc_segbufs));
1331 list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs, 1332 list_add_tail(&segbuf->sb_list, &sci->sc_segbufs);
1332 sb_list) { 1333 sci->sc_segbuf_nblocks = segbuf->sb_rest_blocks;
1333 list_del_init(&segbuf->sb_list);
1334 nilfs_segbuf_free(segbuf);
1335 }
1336 return 0; 1334 return 0;
1335
1336 failed:
1337 nilfs_segbuf_free(segbuf);
1338 return err;
1337} 1339}
1338 1340
1339static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci, 1341static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1340 struct the_nilfs *nilfs, int nadd) 1342 struct the_nilfs *nilfs, int nadd)
1341{ 1343{
1342 struct nilfs_segment_buffer *segbuf, *prev, *n; 1344 struct nilfs_segment_buffer *segbuf, *prev;
1343 struct inode *sufile = nilfs->ns_sufile; 1345 struct inode *sufile = nilfs->ns_sufile;
1344 __u64 nextnextnum; 1346 __u64 nextnextnum;
1345 LIST_HEAD(list); 1347 LIST_HEAD(list);
@@ -1352,7 +1354,7 @@ static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1352 * not be dirty. The following call ensures that the buffer is dirty 1354 * not be dirty. The following call ensures that the buffer is dirty
1353 * and will pin the buffer on memory until the sufile is written. 1355 * and will pin the buffer on memory until the sufile is written.
1354 */ 1356 */
1355 err = nilfs_touch_segusage(sufile, prev->sb_nextnum); 1357 err = nilfs_sufile_mark_dirty(sufile, prev->sb_nextnum);
1356 if (unlikely(err)) 1358 if (unlikely(err))
1357 return err; 1359 return err;
1358 1360
@@ -1378,33 +1380,33 @@ static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1378 list_add_tail(&segbuf->sb_list, &list); 1380 list_add_tail(&segbuf->sb_list, &list);
1379 prev = segbuf; 1381 prev = segbuf;
1380 } 1382 }
1381 list_splice(&list, sci->sc_segbufs.prev); 1383 list_splice_tail(&list, &sci->sc_segbufs);
1382 return 0; 1384 return 0;
1383 1385
1384 failed_segbuf: 1386 failed_segbuf:
1385 nilfs_segbuf_free(segbuf); 1387 nilfs_segbuf_free(segbuf);
1386 failed: 1388 failed:
1387 list_for_each_entry_safe(segbuf, n, &list, sb_list) { 1389 list_for_each_entry(segbuf, &list, sb_list) {
1388 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1390 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1389 WARN_ON(ret); /* never fails */ 1391 WARN_ON(ret); /* never fails */
1390 list_del_init(&segbuf->sb_list);
1391 nilfs_segbuf_free(segbuf);
1392 } 1392 }
1393 nilfs_destroy_logs(&list);
1393 return err; 1394 return err;
1394} 1395}
1395 1396
1396static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci, 1397static void nilfs_free_incomplete_logs(struct list_head *logs,
1397 struct the_nilfs *nilfs) 1398 struct the_nilfs *nilfs)
1398{ 1399{
1399 struct nilfs_segment_buffer *segbuf; 1400 struct nilfs_segment_buffer *segbuf, *prev;
1400 int ret, done = 0; 1401 struct inode *sufile = nilfs->ns_sufile;
1402 int ret;
1401 1403
1402 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1404 segbuf = NILFS_FIRST_SEGBUF(logs);
1403 if (nilfs->ns_nextnum != segbuf->sb_nextnum) { 1405 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1404 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum); 1406 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1405 WARN_ON(ret); /* never fails */ 1407 WARN_ON(ret); /* never fails */
1406 } 1408 }
1407 if (segbuf->sb_io_error) { 1409 if (atomic_read(&segbuf->sb_err)) {
1408 /* Case 1: The first segment failed */ 1410 /* Case 1: The first segment failed */
1409 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) 1411 if (segbuf->sb_pseg_start != segbuf->sb_fseg_start)
1410 /* Case 1a: Partial segment appended into an existing 1412 /* Case 1a: Partial segment appended into an existing
@@ -1413,106 +1415,54 @@ static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
1413 segbuf->sb_fseg_end); 1415 segbuf->sb_fseg_end);
1414 else /* Case 1b: New full segment */ 1416 else /* Case 1b: New full segment */
1415 set_nilfs_discontinued(nilfs); 1417 set_nilfs_discontinued(nilfs);
1416 done++;
1417 } 1418 }
1418 1419
1419 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { 1420 prev = segbuf;
1420 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum); 1421 list_for_each_entry_continue(segbuf, logs, sb_list) {
1421 WARN_ON(ret); /* never fails */ 1422 if (prev->sb_nextnum != segbuf->sb_nextnum) {
1422 if (!done && segbuf->sb_io_error) { 1423 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1423 if (segbuf->sb_segnum != nilfs->ns_nextnum) 1424 WARN_ON(ret); /* never fails */
1424 /* Case 2: extended segment (!= next) failed */
1425 nilfs_sufile_set_error(nilfs->ns_sufile,
1426 segbuf->sb_segnum);
1427 done++;
1428 }
1429 }
1430}
1431
1432static void nilfs_segctor_clear_segment_buffers(struct nilfs_sc_info *sci)
1433{
1434 struct nilfs_segment_buffer *segbuf;
1435
1436 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list)
1437 nilfs_segbuf_clear(segbuf);
1438 sci->sc_super_root = NULL;
1439}
1440
1441static void nilfs_segctor_destroy_segment_buffers(struct nilfs_sc_info *sci)
1442{
1443 struct nilfs_segment_buffer *segbuf;
1444
1445 while (!list_empty(&sci->sc_segbufs)) {
1446 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1447 list_del_init(&segbuf->sb_list);
1448 nilfs_segbuf_free(segbuf);
1449 }
1450 /* sci->sc_curseg = NULL; */
1451}
1452
1453static void nilfs_segctor_end_construction(struct nilfs_sc_info *sci,
1454 struct the_nilfs *nilfs, int err)
1455{
1456 if (unlikely(err)) {
1457 nilfs_segctor_free_incomplete_segments(sci, nilfs);
1458 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1459 int ret;
1460
1461 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1462 sci->sc_freesegs,
1463 sci->sc_nfreesegs,
1464 NULL);
1465 WARN_ON(ret); /* do not happen */
1466 } 1425 }
1426 if (atomic_read(&segbuf->sb_err) &&
1427 segbuf->sb_segnum != nilfs->ns_nextnum)
1428 /* Case 2: extended segment (!= next) failed */
1429 nilfs_sufile_set_error(sufile, segbuf->sb_segnum);
1430 prev = segbuf;
1467 } 1431 }
1468 nilfs_segctor_clear_segment_buffers(sci);
1469} 1432}
1470 1433
1471static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci, 1434static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1472 struct inode *sufile) 1435 struct inode *sufile)
1473{ 1436{
1474 struct nilfs_segment_buffer *segbuf; 1437 struct nilfs_segment_buffer *segbuf;
1475 struct buffer_head *bh_su;
1476 struct nilfs_segment_usage *raw_su;
1477 unsigned long live_blocks; 1438 unsigned long live_blocks;
1478 int ret; 1439 int ret;
1479 1440
1480 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1441 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1481 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1482 &raw_su, &bh_su);
1483 WARN_ON(ret); /* always succeed because bh_su is dirty */
1484 live_blocks = segbuf->sb_sum.nblocks + 1442 live_blocks = segbuf->sb_sum.nblocks +
1485 (segbuf->sb_pseg_start - segbuf->sb_fseg_start); 1443 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1486 raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime); 1444 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1487 raw_su->su_nblocks = cpu_to_le32(live_blocks); 1445 live_blocks,
1488 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, 1446 sci->sc_seg_ctime);
1489 bh_su); 1447 WARN_ON(ret); /* always succeed because the segusage is dirty */
1490 } 1448 }
1491} 1449}
1492 1450
1493static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci, 1451static void nilfs_cancel_segusage(struct list_head *logs, struct inode *sufile)
1494 struct inode *sufile)
1495{ 1452{
1496 struct nilfs_segment_buffer *segbuf; 1453 struct nilfs_segment_buffer *segbuf;
1497 struct buffer_head *bh_su;
1498 struct nilfs_segment_usage *raw_su;
1499 int ret; 1454 int ret;
1500 1455
1501 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1456 segbuf = NILFS_FIRST_SEGBUF(logs);
1502 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, 1457 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1503 &raw_su, &bh_su); 1458 segbuf->sb_pseg_start -
1504 WARN_ON(ret); /* always succeed because bh_su is dirty */ 1459 segbuf->sb_fseg_start, 0);
1505 raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start - 1460 WARN_ON(ret); /* always succeed because the segusage is dirty */
1506 segbuf->sb_fseg_start);
1507 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su);
1508 1461
1509 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { 1462 list_for_each_entry_continue(segbuf, logs, sb_list) {
1510 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, 1463 ret = nilfs_sufile_set_segment_usage(sufile, segbuf->sb_segnum,
1511 &raw_su, &bh_su); 1464 0, 0);
1512 WARN_ON(ret); /* always succeed */ 1465 WARN_ON(ret); /* always succeed */
1513 raw_su->su_nblocks = 0;
1514 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
1515 bh_su);
1516 } 1466 }
1517} 1467}
1518 1468
@@ -1520,17 +1470,15 @@ static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1520 struct nilfs_segment_buffer *last, 1470 struct nilfs_segment_buffer *last,
1521 struct inode *sufile) 1471 struct inode *sufile)
1522{ 1472{
1523 struct nilfs_segment_buffer *segbuf = last, *n; 1473 struct nilfs_segment_buffer *segbuf = last;
1524 int ret; 1474 int ret;
1525 1475
1526 list_for_each_entry_safe_continue(segbuf, n, &sci->sc_segbufs, 1476 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1527 sb_list) {
1528 list_del_init(&segbuf->sb_list);
1529 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; 1477 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1530 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1478 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1531 WARN_ON(ret); 1479 WARN_ON(ret);
1532 nilfs_segbuf_free(segbuf);
1533 } 1480 }
1481 nilfs_truncate_logs(&sci->sc_segbufs, last);
1534} 1482}
1535 1483
1536 1484
@@ -1569,7 +1517,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
1569 NULL); 1517 NULL);
1570 WARN_ON(err); /* do not happen */ 1518 WARN_ON(err); /* do not happen */
1571 } 1519 }
1572 nilfs_segctor_clear_segment_buffers(sci); 1520 nilfs_clear_logs(&sci->sc_segbufs);
1573 1521
1574 err = nilfs_segctor_extend_segments(sci, nilfs, nadd); 1522 err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
1575 if (unlikely(err)) 1523 if (unlikely(err))
@@ -1814,26 +1762,18 @@ static int nilfs_segctor_prepare_write(struct nilfs_sc_info *sci,
1814} 1762}
1815 1763
1816static int nilfs_segctor_write(struct nilfs_sc_info *sci, 1764static int nilfs_segctor_write(struct nilfs_sc_info *sci,
1817 struct backing_dev_info *bdi) 1765 struct the_nilfs *nilfs)
1818{ 1766{
1819 struct nilfs_segment_buffer *segbuf; 1767 struct nilfs_segment_buffer *segbuf;
1820 struct nilfs_write_info wi; 1768 int ret = 0;
1821 int err, res;
1822
1823 wi.sb = sci->sc_super;
1824 wi.bh_sr = sci->sc_super_root;
1825 wi.bdi = bdi;
1826 1769
1827 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1770 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1828 nilfs_segbuf_prepare_write(segbuf, &wi); 1771 ret = nilfs_segbuf_write(segbuf, nilfs);
1829 err = nilfs_segbuf_write(segbuf, &wi); 1772 if (ret)
1830 1773 break;
1831 res = nilfs_segbuf_wait(segbuf, &wi);
1832 err = err ? : res;
1833 if (err)
1834 return err;
1835 } 1774 }
1836 return 0; 1775 list_splice_tail_init(&sci->sc_segbufs, &sci->sc_write_logs);
1776 return ret;
1837} 1777}
1838 1778
1839static void __nilfs_end_page_io(struct page *page, int err) 1779static void __nilfs_end_page_io(struct page *page, int err)
@@ -1911,15 +1851,17 @@ static void nilfs_clear_copied_buffers(struct list_head *list, int err)
1911 } 1851 }
1912} 1852}
1913 1853
1914static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci, 1854static void nilfs_abort_logs(struct list_head *logs, struct page *failed_page,
1915 struct page *failed_page, int err) 1855 struct buffer_head *bh_sr, int err)
1916{ 1856{
1917 struct nilfs_segment_buffer *segbuf; 1857 struct nilfs_segment_buffer *segbuf;
1918 struct page *bd_page = NULL, *fs_page = NULL; 1858 struct page *bd_page = NULL, *fs_page = NULL;
1859 struct buffer_head *bh;
1919 1860
1920 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1861 if (list_empty(logs))
1921 struct buffer_head *bh; 1862 return;
1922 1863
1864 list_for_each_entry(segbuf, logs, sb_list) {
1923 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1865 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
1924 b_assoc_buffers) { 1866 b_assoc_buffers) {
1925 if (bh->b_page != bd_page) { 1867 if (bh->b_page != bd_page) {
@@ -1931,7 +1873,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
1931 1873
1932 list_for_each_entry(bh, &segbuf->sb_payload_buffers, 1874 list_for_each_entry(bh, &segbuf->sb_payload_buffers,
1933 b_assoc_buffers) { 1875 b_assoc_buffers) {
1934 if (bh == sci->sc_super_root) { 1876 if (bh == bh_sr) {
1935 if (bh->b_page != bd_page) { 1877 if (bh->b_page != bd_page) {
1936 end_page_writeback(bd_page); 1878 end_page_writeback(bd_page);
1937 bd_page = bh->b_page; 1879 bd_page = bh->b_page;
@@ -1941,7 +1883,7 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
1941 if (bh->b_page != fs_page) { 1883 if (bh->b_page != fs_page) {
1942 nilfs_end_page_io(fs_page, err); 1884 nilfs_end_page_io(fs_page, err);
1943 if (fs_page && fs_page == failed_page) 1885 if (fs_page && fs_page == failed_page)
1944 goto done; 1886 return;
1945 fs_page = bh->b_page; 1887 fs_page = bh->b_page;
1946 } 1888 }
1947 } 1889 }
@@ -1950,8 +1892,34 @@ static void nilfs_segctor_abort_write(struct nilfs_sc_info *sci,
1950 end_page_writeback(bd_page); 1892 end_page_writeback(bd_page);
1951 1893
1952 nilfs_end_page_io(fs_page, err); 1894 nilfs_end_page_io(fs_page, err);
1953 done: 1895}
1896
1897static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
1898 struct the_nilfs *nilfs, int err)
1899{
1900 LIST_HEAD(logs);
1901 int ret;
1902
1903 list_splice_tail_init(&sci->sc_write_logs, &logs);
1904 ret = nilfs_wait_on_logs(&logs);
1905 if (ret)
1906 nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
1907
1908 list_splice_tail_init(&sci->sc_segbufs, &logs);
1909 nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
1910 nilfs_free_incomplete_logs(&logs, nilfs);
1954 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err); 1911 nilfs_clear_copied_buffers(&sci->sc_copied_buffers, err);
1912
1913 if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
1914 ret = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
1915 sci->sc_freesegs,
1916 sci->sc_nfreesegs,
1917 NULL);
1918 WARN_ON(ret); /* do not happen */
1919 }
1920
1921 nilfs_destroy_logs(&logs);
1922 sci->sc_super_root = NULL;
1955} 1923}
1956 1924
1957static void nilfs_set_next_segment(struct the_nilfs *nilfs, 1925static void nilfs_set_next_segment(struct the_nilfs *nilfs,
@@ -1973,7 +1941,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
1973 struct the_nilfs *nilfs = sbi->s_nilfs; 1941 struct the_nilfs *nilfs = sbi->s_nilfs;
1974 int update_sr = (sci->sc_super_root != NULL); 1942 int update_sr = (sci->sc_super_root != NULL);
1975 1943
1976 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1944 list_for_each_entry(segbuf, &sci->sc_write_logs, sb_list) {
1977 struct buffer_head *bh; 1945 struct buffer_head *bh;
1978 1946
1979 list_for_each_entry(bh, &segbuf->sb_segsum_buffers, 1947 list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
@@ -2046,7 +2014,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
2046 2014
2047 sci->sc_nblk_inc += sci->sc_nblk_this_inc; 2015 sci->sc_nblk_inc += sci->sc_nblk_this_inc;
2048 2016
2049 segbuf = NILFS_LAST_SEGBUF(&sci->sc_segbufs); 2017 segbuf = NILFS_LAST_SEGBUF(&sci->sc_write_logs);
2050 nilfs_set_next_segment(nilfs, segbuf); 2018 nilfs_set_next_segment(nilfs, segbuf);
2051 2019
2052 if (update_sr) { 2020 if (update_sr) {
@@ -2057,10 +2025,23 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
2057 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 2025 clear_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
2058 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags); 2026 clear_bit(NILFS_SC_DIRTY, &sci->sc_flags);
2059 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); 2027 set_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
2028 nilfs_segctor_clear_metadata_dirty(sci);
2060 } else 2029 } else
2061 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags); 2030 clear_bit(NILFS_SC_SUPER_ROOT, &sci->sc_flags);
2062} 2031}
2063 2032
2033static int nilfs_segctor_wait(struct nilfs_sc_info *sci)
2034{
2035 int ret;
2036
2037 ret = nilfs_wait_on_logs(&sci->sc_write_logs);
2038 if (!ret) {
2039 nilfs_segctor_complete_write(sci);
2040 nilfs_destroy_logs(&sci->sc_write_logs);
2041 }
2042 return ret;
2043}
2044
2064static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci, 2045static int nilfs_segctor_check_in_files(struct nilfs_sc_info *sci,
2065 struct nilfs_sb_info *sbi) 2046 struct nilfs_sb_info *sbi)
2066{ 2047{
@@ -2173,7 +2154,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2173 /* Avoid empty segment */ 2154 /* Avoid empty segment */
2174 if (sci->sc_stage.scnt == NILFS_ST_DONE && 2155 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2175 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) { 2156 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
2176 nilfs_segctor_end_construction(sci, nilfs, 1); 2157 nilfs_segctor_abort_construction(sci, nilfs, 1);
2177 goto out; 2158 goto out;
2178 } 2159 }
2179 2160
@@ -2187,7 +2168,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2187 if (has_sr) { 2168 if (has_sr) {
2188 err = nilfs_segctor_fill_in_checkpoint(sci); 2169 err = nilfs_segctor_fill_in_checkpoint(sci);
2189 if (unlikely(err)) 2170 if (unlikely(err))
2190 goto failed_to_make_up; 2171 goto failed_to_write;
2191 2172
2192 nilfs_segctor_fill_in_super_root(sci, nilfs); 2173 nilfs_segctor_fill_in_super_root(sci, nilfs);
2193 } 2174 }
@@ -2195,42 +2176,46 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2195 2176
2196 /* Write partial segments */ 2177 /* Write partial segments */
2197 err = nilfs_segctor_prepare_write(sci, &failed_page); 2178 err = nilfs_segctor_prepare_write(sci, &failed_page);
2198 if (unlikely(err)) 2179 if (err) {
2180 nilfs_abort_logs(&sci->sc_segbufs, failed_page,
2181 sci->sc_super_root, err);
2199 goto failed_to_write; 2182 goto failed_to_write;
2200 2183 }
2201 nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed); 2184 nilfs_segctor_fill_in_checksums(sci, nilfs->ns_crc_seed);
2202 2185
2203 err = nilfs_segctor_write(sci, nilfs->ns_bdi); 2186 err = nilfs_segctor_write(sci, nilfs);
2204 if (unlikely(err)) 2187 if (unlikely(err))
2205 goto failed_to_write; 2188 goto failed_to_write;
2206 2189
2207 nilfs_segctor_complete_write(sci); 2190 if (sci->sc_stage.scnt == NILFS_ST_DONE ||
2208 2191 nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) {
2209 /* Commit segments */ 2192 /*
2210 if (has_sr) 2193 * At this point, we avoid double buffering
2211 nilfs_segctor_clear_metadata_dirty(sci); 2194 * for blocksize < pagesize because page dirty
2212 2195 * flag is turned off during write and dirty
2213 nilfs_segctor_end_construction(sci, nilfs, 0); 2196 * buffers are not properly collected for
2214 2197 * pages crossing over segments.
2198 */
2199 err = nilfs_segctor_wait(sci);
2200 if (err)
2201 goto failed_to_write;
2202 }
2215 } while (sci->sc_stage.scnt != NILFS_ST_DONE); 2203 } while (sci->sc_stage.scnt != NILFS_ST_DONE);
2216 2204
2205 sci->sc_super_root = NULL;
2206
2217 out: 2207 out:
2218 nilfs_segctor_destroy_segment_buffers(sci);
2219 nilfs_segctor_check_out_files(sci, sbi); 2208 nilfs_segctor_check_out_files(sci, sbi);
2220 return err; 2209 return err;
2221 2210
2222 failed_to_write: 2211 failed_to_write:
2223 nilfs_segctor_abort_write(sci, failed_page, err);
2224 nilfs_segctor_cancel_segusage(sci, nilfs->ns_sufile);
2225
2226 failed_to_make_up:
2227 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED) 2212 if (sci->sc_stage.flags & NILFS_CF_IFILE_STARTED)
2228 nilfs_redirty_inodes(&sci->sc_dirty_files); 2213 nilfs_redirty_inodes(&sci->sc_dirty_files);
2229 2214
2230 failed: 2215 failed:
2231 if (nilfs_doing_gc()) 2216 if (nilfs_doing_gc())
2232 nilfs_redirty_inodes(&sci->sc_gc_inodes); 2217 nilfs_redirty_inodes(&sci->sc_gc_inodes);
2233 nilfs_segctor_end_construction(sci, nilfs, err); 2218 nilfs_segctor_abort_construction(sci, nilfs, err);
2234 goto out; 2219 goto out;
2235} 2220}
2236 2221
@@ -2559,7 +2544,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
2559 2544
2560 sci->sc_freesegs = kbufs[4]; 2545 sci->sc_freesegs = kbufs[4];
2561 sci->sc_nfreesegs = argv[4].v_nmembs; 2546 sci->sc_nfreesegs = argv[4].v_nmembs;
2562 list_splice_init(&nilfs->ns_gc_inodes, sci->sc_gc_inodes.prev); 2547 list_splice_tail_init(&nilfs->ns_gc_inodes, &sci->sc_gc_inodes);
2563 2548
2564 for (;;) { 2549 for (;;) {
2565 nilfs_segctor_accept(sci, &req); 2550 nilfs_segctor_accept(sci, &req);
@@ -2788,6 +2773,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct nilfs_sb_info *sbi)
2788 spin_lock_init(&sci->sc_state_lock); 2773 spin_lock_init(&sci->sc_state_lock);
2789 INIT_LIST_HEAD(&sci->sc_dirty_files); 2774 INIT_LIST_HEAD(&sci->sc_dirty_files);
2790 INIT_LIST_HEAD(&sci->sc_segbufs); 2775 INIT_LIST_HEAD(&sci->sc_segbufs);
2776 INIT_LIST_HEAD(&sci->sc_write_logs);
2791 INIT_LIST_HEAD(&sci->sc_gc_inodes); 2777 INIT_LIST_HEAD(&sci->sc_gc_inodes);
2792 INIT_LIST_HEAD(&sci->sc_copied_buffers); 2778 INIT_LIST_HEAD(&sci->sc_copied_buffers);
2793 2779
@@ -2855,6 +2841,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
2855 } 2841 }
2856 2842
2857 WARN_ON(!list_empty(&sci->sc_segbufs)); 2843 WARN_ON(!list_empty(&sci->sc_segbufs));
2844 WARN_ON(!list_empty(&sci->sc_write_logs));
2858 2845
2859 down_write(&sbi->s_nilfs->ns_segctor_sem); 2846 down_write(&sbi->s_nilfs->ns_segctor_sem);
2860 2847
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 0d2a475a741b..3d3ab2f9864c 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -97,6 +97,7 @@ struct nilfs_segsum_pointer {
97 * @sc_dsync_start: start byte offset of data pages 97 * @sc_dsync_start: start byte offset of data pages
98 * @sc_dsync_end: end byte offset of data pages (inclusive) 98 * @sc_dsync_end: end byte offset of data pages (inclusive)
99 * @sc_segbufs: List of segment buffers 99 * @sc_segbufs: List of segment buffers
100 * @sc_write_logs: List of segment buffers to hold logs under writing
100 * @sc_segbuf_nblocks: Number of available blocks in segment buffers. 101 * @sc_segbuf_nblocks: Number of available blocks in segment buffers.
101 * @sc_curseg: Current segment buffer 102 * @sc_curseg: Current segment buffer
102 * @sc_super_root: Pointer to the super root buffer 103 * @sc_super_root: Pointer to the super root buffer
@@ -143,6 +144,7 @@ struct nilfs_sc_info {
143 144
144 /* Segment buffers */ 145 /* Segment buffers */
145 struct list_head sc_segbufs; 146 struct list_head sc_segbufs;
147 struct list_head sc_write_logs;
146 unsigned long sc_segbuf_nblocks; 148 unsigned long sc_segbuf_nblocks;
147 struct nilfs_segment_buffer *sc_curseg; 149 struct nilfs_segment_buffer *sc_curseg;
148 struct buffer_head *sc_super_root; 150 struct buffer_head *sc_super_root;
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 37994d4a59cc..b6c36d0cc331 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -31,6 +31,16 @@
31#include "sufile.h" 31#include "sufile.h"
32 32
33 33
34struct nilfs_sufile_info {
35 struct nilfs_mdt_info mi;
36 unsigned long ncleansegs;
37};
38
39static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile)
40{
41 return (struct nilfs_sufile_info *)NILFS_MDT(sufile);
42}
43
34static inline unsigned long 44static inline unsigned long
35nilfs_sufile_segment_usages_per_block(const struct inode *sufile) 45nilfs_sufile_segment_usages_per_block(const struct inode *sufile)
36{ 46{
@@ -62,14 +72,6 @@ nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr,
62 max - curr + 1); 72 max - curr + 1);
63} 73}
64 74
65static inline struct nilfs_sufile_header *
66nilfs_sufile_block_get_header(const struct inode *sufile,
67 struct buffer_head *bh,
68 void *kaddr)
69{
70 return kaddr + bh_offset(bh);
71}
72
73static struct nilfs_segment_usage * 75static struct nilfs_segment_usage *
74nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, 76nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum,
75 struct buffer_head *bh, void *kaddr) 77 struct buffer_head *bh, void *kaddr)
@@ -110,6 +112,15 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
110} 112}
111 113
112/** 114/**
115 * nilfs_sufile_get_ncleansegs - return the number of clean segments
116 * @sufile: inode of segment usage file
117 */
118unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
119{
120 return NILFS_SUI(sufile)->ncleansegs;
121}
122
123/**
113 * nilfs_sufile_updatev - modify multiple segment usages at a time 124 * nilfs_sufile_updatev - modify multiple segment usages at a time
114 * @sufile: inode of segment usage file 125 * @sufile: inode of segment usage file
115 * @segnumv: array of segment numbers 126 * @segnumv: array of segment numbers
@@ -270,7 +281,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
270 if (ret < 0) 281 if (ret < 0)
271 goto out_sem; 282 goto out_sem;
272 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 283 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
273 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 284 header = kaddr + bh_offset(header_bh);
274 ncleansegs = le64_to_cpu(header->sh_ncleansegs); 285 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
275 last_alloc = le64_to_cpu(header->sh_last_alloc); 286 last_alloc = le64_to_cpu(header->sh_last_alloc);
276 kunmap_atomic(kaddr, KM_USER0); 287 kunmap_atomic(kaddr, KM_USER0);
@@ -302,13 +313,13 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
302 kunmap_atomic(kaddr, KM_USER0); 313 kunmap_atomic(kaddr, KM_USER0);
303 314
304 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 315 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
305 header = nilfs_sufile_block_get_header( 316 header = kaddr + bh_offset(header_bh);
306 sufile, header_bh, kaddr);
307 le64_add_cpu(&header->sh_ncleansegs, -1); 317 le64_add_cpu(&header->sh_ncleansegs, -1);
308 le64_add_cpu(&header->sh_ndirtysegs, 1); 318 le64_add_cpu(&header->sh_ndirtysegs, 1);
309 header->sh_last_alloc = cpu_to_le64(segnum); 319 header->sh_last_alloc = cpu_to_le64(segnum);
310 kunmap_atomic(kaddr, KM_USER0); 320 kunmap_atomic(kaddr, KM_USER0);
311 321
322 NILFS_SUI(sufile)->ncleansegs--;
312 nilfs_mdt_mark_buffer_dirty(header_bh); 323 nilfs_mdt_mark_buffer_dirty(header_bh);
313 nilfs_mdt_mark_buffer_dirty(su_bh); 324 nilfs_mdt_mark_buffer_dirty(su_bh);
314 nilfs_mdt_mark_dirty(sufile); 325 nilfs_mdt_mark_dirty(sufile);
@@ -351,6 +362,8 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
351 kunmap_atomic(kaddr, KM_USER0); 362 kunmap_atomic(kaddr, KM_USER0);
352 363
353 nilfs_sufile_mod_counter(header_bh, -1, 1); 364 nilfs_sufile_mod_counter(header_bh, -1, 1);
365 NILFS_SUI(sufile)->ncleansegs--;
366
354 nilfs_mdt_mark_buffer_dirty(su_bh); 367 nilfs_mdt_mark_buffer_dirty(su_bh);
355 nilfs_mdt_mark_dirty(sufile); 368 nilfs_mdt_mark_dirty(sufile);
356} 369}
@@ -380,6 +393,8 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
380 kunmap_atomic(kaddr, KM_USER0); 393 kunmap_atomic(kaddr, KM_USER0);
381 394
382 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 395 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
396 NILFS_SUI(sufile)->ncleansegs -= clean;
397
383 nilfs_mdt_mark_buffer_dirty(su_bh); 398 nilfs_mdt_mark_buffer_dirty(su_bh);
384 nilfs_mdt_mark_dirty(sufile); 399 nilfs_mdt_mark_dirty(sufile);
385} 400}
@@ -409,79 +424,65 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
409 nilfs_mdt_mark_buffer_dirty(su_bh); 424 nilfs_mdt_mark_buffer_dirty(su_bh);
410 425
411 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 426 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
427 NILFS_SUI(sufile)->ncleansegs++;
428
412 nilfs_mdt_mark_dirty(sufile); 429 nilfs_mdt_mark_dirty(sufile);
413} 430}
414 431
415/** 432/**
416 * nilfs_sufile_get_segment_usage - get a segment usage 433 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
417 * @sufile: inode of segment usage file 434 * @sufile: inode of segment usage file
418 * @segnum: segment number 435 * @segnum: segment number
419 * @sup: pointer to segment usage
420 * @bhp: pointer to buffer head
421 *
422 * Description: nilfs_sufile_get_segment_usage() acquires the segment usage
423 * specified by @segnum.
424 *
425 * Return Value: On success, 0 is returned, and the segment usage and the
426 * buffer head of the buffer on which the segment usage is located are stored
427 * in the place pointed by @sup and @bhp, respectively. On error, one of the
428 * following negative error codes is returned.
429 *
430 * %-EIO - I/O error.
431 *
432 * %-ENOMEM - Insufficient amount of memory available.
433 *
434 * %-EINVAL - Invalid segment usage number.
435 */ 436 */
436int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum, 437int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
437 struct nilfs_segment_usage **sup,
438 struct buffer_head **bhp)
439{ 438{
440 struct buffer_head *bh; 439 struct buffer_head *bh;
441 struct nilfs_segment_usage *su;
442 void *kaddr;
443 int ret; 440 int ret;
444 441
445 /* segnum is 0 origin */ 442 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
446 if (segnum >= nilfs_sufile_get_nsegments(sufile)) 443 if (!ret) {
447 return -EINVAL; 444 nilfs_mdt_mark_buffer_dirty(bh);
448 down_write(&NILFS_MDT(sufile)->mi_sem); 445 nilfs_mdt_mark_dirty(sufile);
449 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
450 if (ret < 0)
451 goto out_sem;
452 kaddr = kmap(bh->b_page);
453 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
454 if (nilfs_segment_usage_error(su)) {
455 kunmap(bh->b_page);
456 brelse(bh); 446 brelse(bh);
457 ret = -EINVAL;
458 goto out_sem;
459 } 447 }
460
461 if (sup != NULL)
462 *sup = su;
463 *bhp = bh;
464
465 out_sem:
466 up_write(&NILFS_MDT(sufile)->mi_sem);
467 return ret; 448 return ret;
468} 449}
469 450
470/** 451/**
471 * nilfs_sufile_put_segment_usage - put a segment usage 452 * nilfs_sufile_set_segment_usage - set usage of a segment
472 * @sufile: inode of segment usage file 453 * @sufile: inode of segment usage file
473 * @segnum: segment number 454 * @segnum: segment number
474 * @bh: buffer head 455 * @nblocks: number of live blocks in the segment
475 * 456 * @modtime: modification time (option)
476 * Description: nilfs_sufile_put_segment_usage() releases the segment usage
477 * specified by @segnum. @bh must be the buffer head which have been returned
478 * by a previous call to nilfs_sufile_get_segment_usage() with @segnum.
479 */ 457 */
480void nilfs_sufile_put_segment_usage(struct inode *sufile, __u64 segnum, 458int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
481 struct buffer_head *bh) 459 unsigned long nblocks, time_t modtime)
482{ 460{
483 kunmap(bh->b_page); 461 struct buffer_head *bh;
462 struct nilfs_segment_usage *su;
463 void *kaddr;
464 int ret;
465
466 down_write(&NILFS_MDT(sufile)->mi_sem);
467 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh);
468 if (ret < 0)
469 goto out_sem;
470
471 kaddr = kmap_atomic(bh->b_page, KM_USER0);
472 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
473 WARN_ON(nilfs_segment_usage_error(su));
474 if (modtime)
475 su->su_lastmod = cpu_to_le64(modtime);
476 su->su_nblocks = cpu_to_le32(nblocks);
477 kunmap_atomic(kaddr, KM_USER0);
478
479 nilfs_mdt_mark_buffer_dirty(bh);
480 nilfs_mdt_mark_dirty(sufile);
484 brelse(bh); 481 brelse(bh);
482
483 out_sem:
484 up_write(&NILFS_MDT(sufile)->mi_sem);
485 return ret;
485} 486}
486 487
487/** 488/**
@@ -515,7 +516,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
515 goto out_sem; 516 goto out_sem;
516 517
517 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 518 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
518 header = nilfs_sufile_block_get_header(sufile, header_bh, kaddr); 519 header = kaddr + bh_offset(header_bh);
519 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 520 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
520 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 521 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
521 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); 522 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs);
@@ -532,33 +533,6 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
532 return ret; 533 return ret;
533} 534}
534 535
535/**
536 * nilfs_sufile_get_ncleansegs - get the number of clean segments
537 * @sufile: inode of segment usage file
538 * @nsegsp: pointer to the number of clean segments
539 *
540 * Description: nilfs_sufile_get_ncleansegs() acquires the number of clean
541 * segments.
542 *
543 * Return Value: On success, 0 is returned and the number of clean segments is
544 * stored in the place pointed by @nsegsp. On error, one of the following
545 * negative error codes is returned.
546 *
547 * %-EIO - I/O error.
548 *
549 * %-ENOMEM - Insufficient amount of memory available.
550 */
551int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
552{
553 struct nilfs_sustat sustat;
554 int ret;
555
556 ret = nilfs_sufile_get_stat(sufile, &sustat);
557 if (ret == 0)
558 *nsegsp = sustat.ss_ncleansegs;
559 return ret;
560}
561
562void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, 536void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
563 struct buffer_head *header_bh, 537 struct buffer_head *header_bh,
564 struct buffer_head *su_bh) 538 struct buffer_head *su_bh)
@@ -577,8 +551,10 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
577 nilfs_segment_usage_set_error(su); 551 nilfs_segment_usage_set_error(su);
578 kunmap_atomic(kaddr, KM_USER0); 552 kunmap_atomic(kaddr, KM_USER0);
579 553
580 if (suclean) 554 if (suclean) {
581 nilfs_sufile_mod_counter(header_bh, -1, 0); 555 nilfs_sufile_mod_counter(header_bh, -1, 0);
556 NILFS_SUI(sufile)->ncleansegs--;
557 }
582 nilfs_mdt_mark_buffer_dirty(su_bh); 558 nilfs_mdt_mark_buffer_dirty(su_bh);
583 nilfs_mdt_mark_dirty(sufile); 559 nilfs_mdt_mark_dirty(sufile);
584} 560}
@@ -657,3 +633,48 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
657 up_read(&NILFS_MDT(sufile)->mi_sem); 633 up_read(&NILFS_MDT(sufile)->mi_sem);
658 return ret; 634 return ret;
659} 635}
636
637/**
638 * nilfs_sufile_read - read sufile inode
639 * @sufile: sufile inode
640 * @raw_inode: on-disk sufile inode
641 */
642int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode)
643{
644 struct nilfs_sufile_info *sui = NILFS_SUI(sufile);
645 struct buffer_head *header_bh;
646 struct nilfs_sufile_header *header;
647 void *kaddr;
648 int ret;
649
650 ret = nilfs_read_inode_common(sufile, raw_inode);
651 if (ret < 0)
652 return ret;
653
654 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
655 if (!ret) {
656 kaddr = kmap_atomic(header_bh->b_page, KM_USER0);
657 header = kaddr + bh_offset(header_bh);
658 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
659 kunmap_atomic(kaddr, KM_USER0);
660 brelse(header_bh);
661 }
662 return ret;
663}
664
665/**
666 * nilfs_sufile_new - create sufile
667 * @nilfs: nilfs object
668 * @susize: size of a segment usage entry
669 */
670struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize)
671{
672 struct inode *sufile;
673
674 sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO,
675 sizeof(struct nilfs_sufile_info));
676 if (sufile)
677 nilfs_mdt_set_entry_size(sufile, susize,
678 sizeof(struct nilfs_sufile_header));
679 return sufile;
680}
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 0e99e5c0bd0f..15163b8aff7d 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -34,14 +34,13 @@ static inline unsigned long nilfs_sufile_get_nsegments(struct inode *sufile)
34 return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments; 34 return NILFS_MDT(sufile)->mi_nilfs->ns_nsegments;
35} 35}
36 36
37unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile);
38
37int nilfs_sufile_alloc(struct inode *, __u64 *); 39int nilfs_sufile_alloc(struct inode *, __u64 *);
38int nilfs_sufile_get_segment_usage(struct inode *, __u64, 40int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
39 struct nilfs_segment_usage **, 41int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
40 struct buffer_head **); 42 unsigned long nblocks, time_t modtime);
41void nilfs_sufile_put_segment_usage(struct inode *, __u64,
42 struct buffer_head *);
43int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); 43int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
44int nilfs_sufile_get_ncleansegs(struct inode *, unsigned long *);
45ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, 44ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned,
46 size_t); 45 size_t);
47 46
@@ -62,6 +61,9 @@ void nilfs_sufile_do_cancel_free(struct inode *, __u64, struct buffer_head *,
62void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *, 61void nilfs_sufile_do_set_error(struct inode *, __u64, struct buffer_head *,
63 struct buffer_head *); 62 struct buffer_head *);
64 63
64int nilfs_sufile_read(struct inode *sufile, struct nilfs_inode *raw_inode);
65struct inode *nilfs_sufile_new(struct the_nilfs *nilfs, size_t susize);
66
65/** 67/**
66 * nilfs_sufile_scrap - make a segment garbage 68 * nilfs_sufile_scrap - make a segment garbage
67 * @sufile: inode of segment usage file 69 * @sufile: inode of segment usage file
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 644e66727dd0..5403b3ef3a42 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -363,14 +363,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno)
363 list_add(&sbi->s_list, &nilfs->ns_supers); 363 list_add(&sbi->s_list, &nilfs->ns_supers);
364 up_write(&nilfs->ns_super_sem); 364 up_write(&nilfs->ns_super_sem);
365 365
366 sbi->s_ifile = nilfs_mdt_new(nilfs, sbi->s_super, NILFS_IFILE_INO); 366 sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size);
367 if (!sbi->s_ifile) 367 if (!sbi->s_ifile)
368 return -ENOMEM; 368 return -ENOMEM;
369 369
370 err = nilfs_palloc_init_blockgroup(sbi->s_ifile, nilfs->ns_inode_size);
371 if (unlikely(err))
372 goto failed;
373
374 down_read(&nilfs->ns_segctor_sem); 370 down_read(&nilfs->ns_segctor_sem);
375 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, 371 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp,
376 &bh_cp); 372 &bh_cp);
@@ -411,7 +407,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
411{ 407{
412 struct the_nilfs *nilfs = sbi->s_nilfs; 408 struct the_nilfs *nilfs = sbi->s_nilfs;
413 409
414 nilfs_mdt_clear(sbi->s_ifile);
415 nilfs_mdt_destroy(sbi->s_ifile); 410 nilfs_mdt_destroy(sbi->s_ifile);
416 sbi->s_ifile = NULL; 411 sbi->s_ifile = NULL;
417 down_write(&nilfs->ns_super_sem); 412 down_write(&nilfs->ns_super_sem);
@@ -419,22 +414,6 @@ void nilfs_detach_checkpoint(struct nilfs_sb_info *sbi)
419 up_write(&nilfs->ns_super_sem); 414 up_write(&nilfs->ns_super_sem);
420} 415}
421 416
422static int nilfs_mark_recovery_complete(struct nilfs_sb_info *sbi)
423{
424 struct the_nilfs *nilfs = sbi->s_nilfs;
425 int err = 0;
426
427 down_write(&nilfs->ns_sem);
428 if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) {
429 nilfs->ns_mount_state |= NILFS_VALID_FS;
430 err = nilfs_commit_super(sbi, 1);
431 if (likely(!err))
432 printk(KERN_INFO "NILFS: recovery complete.\n");
433 }
434 up_write(&nilfs->ns_sem);
435 return err;
436}
437
438static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) 417static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
439{ 418{
440 struct super_block *sb = dentry->d_sb; 419 struct super_block *sb = dentry->d_sb;
@@ -490,7 +469,7 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
490 struct nilfs_sb_info *sbi = NILFS_SB(sb); 469 struct nilfs_sb_info *sbi = NILFS_SB(sb);
491 470
492 if (!nilfs_test_opt(sbi, BARRIER)) 471 if (!nilfs_test_opt(sbi, BARRIER))
493 seq_printf(seq, ",barrier=off"); 472 seq_printf(seq, ",nobarrier");
494 if (nilfs_test_opt(sbi, SNAPSHOT)) 473 if (nilfs_test_opt(sbi, SNAPSHOT))
495 seq_printf(seq, ",cp=%llu", 474 seq_printf(seq, ",cp=%llu",
496 (unsigned long long int)sbi->s_snapshot_cno); 475 (unsigned long long int)sbi->s_snapshot_cno);
@@ -500,6 +479,8 @@ static int nilfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
500 seq_printf(seq, ",errors=panic"); 479 seq_printf(seq, ",errors=panic");
501 if (nilfs_test_opt(sbi, STRICT_ORDER)) 480 if (nilfs_test_opt(sbi, STRICT_ORDER))
502 seq_printf(seq, ",order=strict"); 481 seq_printf(seq, ",order=strict");
482 if (nilfs_test_opt(sbi, NORECOVERY))
483 seq_printf(seq, ",norecovery");
503 484
504 return 0; 485 return 0;
505} 486}
@@ -568,7 +549,7 @@ static const struct export_operations nilfs_export_ops = {
568 549
569enum { 550enum {
570 Opt_err_cont, Opt_err_panic, Opt_err_ro, 551 Opt_err_cont, Opt_err_panic, Opt_err_ro,
571 Opt_barrier, Opt_snapshot, Opt_order, 552 Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
572 Opt_err, 553 Opt_err,
573}; 554};
574 555
@@ -576,25 +557,13 @@ static match_table_t tokens = {
576 {Opt_err_cont, "errors=continue"}, 557 {Opt_err_cont, "errors=continue"},
577 {Opt_err_panic, "errors=panic"}, 558 {Opt_err_panic, "errors=panic"},
578 {Opt_err_ro, "errors=remount-ro"}, 559 {Opt_err_ro, "errors=remount-ro"},
579 {Opt_barrier, "barrier=%s"}, 560 {Opt_nobarrier, "nobarrier"},
580 {Opt_snapshot, "cp=%u"}, 561 {Opt_snapshot, "cp=%u"},
581 {Opt_order, "order=%s"}, 562 {Opt_order, "order=%s"},
563 {Opt_norecovery, "norecovery"},
582 {Opt_err, NULL} 564 {Opt_err, NULL}
583}; 565};
584 566
585static int match_bool(substring_t *s, int *result)
586{
587 int len = s->to - s->from;
588
589 if (strncmp(s->from, "on", len) == 0)
590 *result = 1;
591 else if (strncmp(s->from, "off", len) == 0)
592 *result = 0;
593 else
594 return 1;
595 return 0;
596}
597
598static int parse_options(char *options, struct super_block *sb) 567static int parse_options(char *options, struct super_block *sb)
599{ 568{
600 struct nilfs_sb_info *sbi = NILFS_SB(sb); 569 struct nilfs_sb_info *sbi = NILFS_SB(sb);
@@ -612,13 +581,8 @@ static int parse_options(char *options, struct super_block *sb)
612 581
613 token = match_token(p, tokens, args); 582 token = match_token(p, tokens, args);
614 switch (token) { 583 switch (token) {
615 case Opt_barrier: 584 case Opt_nobarrier:
616 if (match_bool(&args[0], &option)) 585 nilfs_clear_opt(sbi, BARRIER);
617 return 0;
618 if (option)
619 nilfs_set_opt(sbi, BARRIER);
620 else
621 nilfs_clear_opt(sbi, BARRIER);
622 break; 586 break;
623 case Opt_order: 587 case Opt_order:
624 if (strcmp(args[0].from, "relaxed") == 0) 588 if (strcmp(args[0].from, "relaxed") == 0)
@@ -647,6 +611,9 @@ static int parse_options(char *options, struct super_block *sb)
647 sbi->s_snapshot_cno = option; 611 sbi->s_snapshot_cno = option;
648 nilfs_set_opt(sbi, SNAPSHOT); 612 nilfs_set_opt(sbi, SNAPSHOT);
649 break; 613 break;
614 case Opt_norecovery:
615 nilfs_set_opt(sbi, NORECOVERY);
616 break;
650 default: 617 default:
651 printk(KERN_ERR 618 printk(KERN_ERR
652 "NILFS: Unrecognized mount option \"%s\"\n", p); 619 "NILFS: Unrecognized mount option \"%s\"\n", p);
@@ -672,9 +639,7 @@ static int nilfs_setup_super(struct nilfs_sb_info *sbi)
672 int mnt_count = le16_to_cpu(sbp->s_mnt_count); 639 int mnt_count = le16_to_cpu(sbp->s_mnt_count);
673 640
674 /* nilfs->sem must be locked by the caller. */ 641 /* nilfs->sem must be locked by the caller. */
675 if (!(nilfs->ns_mount_state & NILFS_VALID_FS)) { 642 if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
676 printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
677 } else if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
678 printk(KERN_WARNING 643 printk(KERN_WARNING
679 "NILFS warning: mounting fs with errors\n"); 644 "NILFS warning: mounting fs with errors\n");
680#if 0 645#if 0
@@ -782,11 +747,10 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
782 sb->s_root = NULL; 747 sb->s_root = NULL;
783 sb->s_time_gran = 1; 748 sb->s_time_gran = 1;
784 749
785 if (!nilfs_loaded(nilfs)) { 750 err = load_nilfs(nilfs, sbi);
786 err = load_nilfs(nilfs, sbi); 751 if (err)
787 if (err) 752 goto failed_sbi;
788 goto failed_sbi; 753
789 }
790 cno = nilfs_last_cno(nilfs); 754 cno = nilfs_last_cno(nilfs);
791 755
792 if (sb->s_flags & MS_RDONLY) { 756 if (sb->s_flags & MS_RDONLY) {
@@ -854,12 +818,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
854 up_write(&nilfs->ns_sem); 818 up_write(&nilfs->ns_sem);
855 } 819 }
856 820
857 err = nilfs_mark_recovery_complete(sbi);
858 if (unlikely(err)) {
859 printk(KERN_ERR "NILFS: recovery failed.\n");
860 goto failed_root;
861 }
862
863 down_write(&nilfs->ns_super_sem); 821 down_write(&nilfs->ns_super_sem);
864 if (!nilfs_test_opt(sbi, SNAPSHOT)) 822 if (!nilfs_test_opt(sbi, SNAPSHOT))
865 nilfs->ns_current = sbi; 823 nilfs->ns_current = sbi;
@@ -867,10 +825,6 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
867 825
868 return 0; 826 return 0;
869 827
870 failed_root:
871 dput(sb->s_root);
872 sb->s_root = NULL;
873
874 failed_segctor: 828 failed_segctor:
875 nilfs_detach_segment_constructor(sbi); 829 nilfs_detach_segment_constructor(sbi);
876 830
@@ -915,6 +869,14 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
915 goto restore_opts; 869 goto restore_opts;
916 } 870 }
917 871
872 if (!nilfs_valid_fs(nilfs)) {
873 printk(KERN_WARNING "NILFS (device %s): couldn't "
874 "remount because the filesystem is in an "
875 "incomplete recovery state.\n", sb->s_id);
876 err = -EINVAL;
877 goto restore_opts;
878 }
879
918 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 880 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
919 goto out; 881 goto out;
920 if (*flags & MS_RDONLY) { 882 if (*flags & MS_RDONLY) {
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index ad391a8c3e7e..6241e1722efc 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -146,13 +146,9 @@ void put_nilfs(struct the_nilfs *nilfs)
146 146
147 might_sleep(); 147 might_sleep();
148 if (nilfs_loaded(nilfs)) { 148 if (nilfs_loaded(nilfs)) {
149 nilfs_mdt_clear(nilfs->ns_sufile);
150 nilfs_mdt_destroy(nilfs->ns_sufile); 149 nilfs_mdt_destroy(nilfs->ns_sufile);
151 nilfs_mdt_clear(nilfs->ns_cpfile);
152 nilfs_mdt_destroy(nilfs->ns_cpfile); 150 nilfs_mdt_destroy(nilfs->ns_cpfile);
153 nilfs_mdt_clear(nilfs->ns_dat);
154 nilfs_mdt_destroy(nilfs->ns_dat); 151 nilfs_mdt_destroy(nilfs->ns_dat);
155 /* XXX: how and when to clear nilfs->ns_gc_dat? */
156 nilfs_mdt_destroy(nilfs->ns_gc_dat); 152 nilfs_mdt_destroy(nilfs->ns_gc_dat);
157 } 153 }
158 if (nilfs_init(nilfs)) { 154 if (nilfs_init(nilfs)) {
@@ -166,7 +162,6 @@ void put_nilfs(struct the_nilfs *nilfs)
166static int nilfs_load_super_root(struct the_nilfs *nilfs, 162static int nilfs_load_super_root(struct the_nilfs *nilfs,
167 struct nilfs_sb_info *sbi, sector_t sr_block) 163 struct nilfs_sb_info *sbi, sector_t sr_block)
168{ 164{
169 static struct lock_class_key dat_lock_key;
170 struct buffer_head *bh_sr; 165 struct buffer_head *bh_sr;
171 struct nilfs_super_root *raw_sr; 166 struct nilfs_super_root *raw_sr;
172 struct nilfs_super_block **sbp = nilfs->ns_sbp; 167 struct nilfs_super_block **sbp = nilfs->ns_sbp;
@@ -187,51 +182,36 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs,
187 inode_size = nilfs->ns_inode_size; 182 inode_size = nilfs->ns_inode_size;
188 183
189 err = -ENOMEM; 184 err = -ENOMEM;
190 nilfs->ns_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO); 185 nilfs->ns_dat = nilfs_dat_new(nilfs, dat_entry_size);
191 if (unlikely(!nilfs->ns_dat)) 186 if (unlikely(!nilfs->ns_dat))
192 goto failed; 187 goto failed;
193 188
194 nilfs->ns_gc_dat = nilfs_mdt_new(nilfs, NULL, NILFS_DAT_INO); 189 nilfs->ns_gc_dat = nilfs_dat_new(nilfs, dat_entry_size);
195 if (unlikely(!nilfs->ns_gc_dat)) 190 if (unlikely(!nilfs->ns_gc_dat))
196 goto failed_dat; 191 goto failed_dat;
197 192
198 nilfs->ns_cpfile = nilfs_mdt_new(nilfs, NULL, NILFS_CPFILE_INO); 193 nilfs->ns_cpfile = nilfs_cpfile_new(nilfs, checkpoint_size);
199 if (unlikely(!nilfs->ns_cpfile)) 194 if (unlikely(!nilfs->ns_cpfile))
200 goto failed_gc_dat; 195 goto failed_gc_dat;
201 196
202 nilfs->ns_sufile = nilfs_mdt_new(nilfs, NULL, NILFS_SUFILE_INO); 197 nilfs->ns_sufile = nilfs_sufile_new(nilfs, segment_usage_size);
203 if (unlikely(!nilfs->ns_sufile)) 198 if (unlikely(!nilfs->ns_sufile))
204 goto failed_cpfile; 199 goto failed_cpfile;
205 200
206 err = nilfs_palloc_init_blockgroup(nilfs->ns_dat, dat_entry_size);
207 if (unlikely(err))
208 goto failed_sufile;
209
210 err = nilfs_palloc_init_blockgroup(nilfs->ns_gc_dat, dat_entry_size);
211 if (unlikely(err))
212 goto failed_sufile;
213
214 lockdep_set_class(&NILFS_MDT(nilfs->ns_dat)->mi_sem, &dat_lock_key);
215 lockdep_set_class(&NILFS_MDT(nilfs->ns_gc_dat)->mi_sem, &dat_lock_key);
216
217 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat); 201 nilfs_mdt_set_shadow(nilfs->ns_dat, nilfs->ns_gc_dat);
218 nilfs_mdt_set_entry_size(nilfs->ns_cpfile, checkpoint_size,
219 sizeof(struct nilfs_cpfile_header));
220 nilfs_mdt_set_entry_size(nilfs->ns_sufile, segment_usage_size,
221 sizeof(struct nilfs_sufile_header));
222 202
223 err = nilfs_mdt_read_inode_direct( 203 err = nilfs_dat_read(nilfs->ns_dat, (void *)bh_sr->b_data +
224 nilfs->ns_dat, bh_sr, NILFS_SR_DAT_OFFSET(inode_size)); 204 NILFS_SR_DAT_OFFSET(inode_size));
225 if (unlikely(err)) 205 if (unlikely(err))
226 goto failed_sufile; 206 goto failed_sufile;
227 207
228 err = nilfs_mdt_read_inode_direct( 208 err = nilfs_cpfile_read(nilfs->ns_cpfile, (void *)bh_sr->b_data +
229 nilfs->ns_cpfile, bh_sr, NILFS_SR_CPFILE_OFFSET(inode_size)); 209 NILFS_SR_CPFILE_OFFSET(inode_size));
230 if (unlikely(err)) 210 if (unlikely(err))
231 goto failed_sufile; 211 goto failed_sufile;
232 212
233 err = nilfs_mdt_read_inode_direct( 213 err = nilfs_sufile_read(nilfs->ns_sufile, (void *)bh_sr->b_data +
234 nilfs->ns_sufile, bh_sr, NILFS_SR_SUFILE_OFFSET(inode_size)); 214 NILFS_SR_SUFILE_OFFSET(inode_size));
235 if (unlikely(err)) 215 if (unlikely(err))
236 goto failed_sufile; 216 goto failed_sufile;
237 217
@@ -281,29 +261,30 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
281 struct nilfs_recovery_info ri; 261 struct nilfs_recovery_info ri;
282 unsigned int s_flags = sbi->s_super->s_flags; 262 unsigned int s_flags = sbi->s_super->s_flags;
283 int really_read_only = bdev_read_only(nilfs->ns_bdev); 263 int really_read_only = bdev_read_only(nilfs->ns_bdev);
284 unsigned valid_fs; 264 int valid_fs = nilfs_valid_fs(nilfs);
285 int err = 0; 265 int err;
286
287 nilfs_init_recovery_info(&ri);
288 266
289 down_write(&nilfs->ns_sem); 267 if (nilfs_loaded(nilfs)) {
290 valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); 268 if (valid_fs ||
291 up_write(&nilfs->ns_sem); 269 ((s_flags & MS_RDONLY) && nilfs_test_opt(sbi, NORECOVERY)))
270 return 0;
271 printk(KERN_ERR "NILFS: the filesystem is in an incomplete "
272 "recovery state.\n");
273 return -EINVAL;
274 }
292 275
293 if (!valid_fs && (s_flags & MS_RDONLY)) { 276 if (!valid_fs) {
294 printk(KERN_INFO "NILFS: INFO: recovery " 277 printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
295 "required for readonly filesystem.\n"); 278 if (s_flags & MS_RDONLY) {
296 if (really_read_only) { 279 printk(KERN_INFO "NILFS: INFO: recovery "
297 printk(KERN_ERR "NILFS: write access " 280 "required for readonly filesystem.\n");
298 "unavailable, cannot proceed.\n"); 281 printk(KERN_INFO "NILFS: write access will "
299 err = -EROFS; 282 "be enabled during recovery.\n");
300 goto failed;
301 } 283 }
302 printk(KERN_INFO "NILFS: write access will "
303 "be enabled during recovery.\n");
304 sbi->s_super->s_flags &= ~MS_RDONLY;
305 } 284 }
306 285
286 nilfs_init_recovery_info(&ri);
287
307 err = nilfs_search_super_root(nilfs, sbi, &ri); 288 err = nilfs_search_super_root(nilfs, sbi, &ri);
308 if (unlikely(err)) { 289 if (unlikely(err)) {
309 printk(KERN_ERR "NILFS: error searching super root.\n"); 290 printk(KERN_ERR "NILFS: error searching super root.\n");
@@ -316,19 +297,56 @@ int load_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi)
316 goto failed; 297 goto failed;
317 } 298 }
318 299
319 if (!valid_fs) { 300 if (valid_fs)
320 err = nilfs_recover_logical_segments(nilfs, sbi, &ri); 301 goto skip_recovery;
321 if (unlikely(err)) { 302
322 nilfs_mdt_destroy(nilfs->ns_cpfile); 303 if (s_flags & MS_RDONLY) {
323 nilfs_mdt_destroy(nilfs->ns_sufile); 304 if (nilfs_test_opt(sbi, NORECOVERY)) {
324 nilfs_mdt_destroy(nilfs->ns_dat); 305 printk(KERN_INFO "NILFS: norecovery option specified. "
325 goto failed; 306 "skipping roll-forward recovery\n");
307 goto skip_recovery;
326 } 308 }
327 if (ri.ri_need_recovery == NILFS_RECOVERY_SR_UPDATED) 309 if (really_read_only) {
328 sbi->s_super->s_dirt = 1; 310 printk(KERN_ERR "NILFS: write access "
311 "unavailable, cannot proceed.\n");
312 err = -EROFS;
313 goto failed_unload;
314 }
315 sbi->s_super->s_flags &= ~MS_RDONLY;
316 } else if (nilfs_test_opt(sbi, NORECOVERY)) {
317 printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
318 "option was specified for a read/write mount\n");
319 err = -EINVAL;
320 goto failed_unload;
329 } 321 }
330 322
323 err = nilfs_recover_logical_segments(nilfs, sbi, &ri);
324 if (err)
325 goto failed_unload;
326
327 down_write(&nilfs->ns_sem);
328 nilfs->ns_mount_state |= NILFS_VALID_FS;
329 nilfs->ns_sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
330 err = nilfs_commit_super(sbi, 1);
331 up_write(&nilfs->ns_sem);
332
333 if (err) {
334 printk(KERN_ERR "NILFS: failed to update super block. "
335 "recovery unfinished.\n");
336 goto failed_unload;
337 }
338 printk(KERN_INFO "NILFS: recovery complete.\n");
339
340 skip_recovery:
331 set_nilfs_loaded(nilfs); 341 set_nilfs_loaded(nilfs);
342 nilfs_clear_recovery_info(&ri);
343 sbi->s_super->s_flags = s_flags;
344 return 0;
345
346 failed_unload:
347 nilfs_mdt_destroy(nilfs->ns_cpfile);
348 nilfs_mdt_destroy(nilfs->ns_sufile);
349 nilfs_mdt_destroy(nilfs->ns_dat);
332 350
333 failed: 351 failed:
334 nilfs_clear_recovery_info(&ri); 352 nilfs_clear_recovery_info(&ri);
@@ -632,30 +650,23 @@ int nilfs_count_free_blocks(struct the_nilfs *nilfs, sector_t *nblocks)
632{ 650{
633 struct inode *dat = nilfs_dat_inode(nilfs); 651 struct inode *dat = nilfs_dat_inode(nilfs);
634 unsigned long ncleansegs; 652 unsigned long ncleansegs;
635 int err;
636 653
637 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 654 down_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
638 err = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile, &ncleansegs); 655 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
639 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */ 656 up_read(&NILFS_MDT(dat)->mi_sem); /* XXX */
640 if (likely(!err)) 657 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment;
641 *nblocks = (sector_t)ncleansegs * nilfs->ns_blocks_per_segment; 658 return 0;
642 return err;
643} 659}
644 660
645int nilfs_near_disk_full(struct the_nilfs *nilfs) 661int nilfs_near_disk_full(struct the_nilfs *nilfs)
646{ 662{
647 struct inode *sufile = nilfs->ns_sufile;
648 unsigned long ncleansegs, nincsegs; 663 unsigned long ncleansegs, nincsegs;
649 int ret;
650 664
651 ret = nilfs_sufile_get_ncleansegs(sufile, &ncleansegs); 665 ncleansegs = nilfs_sufile_get_ncleansegs(nilfs->ns_sufile);
652 if (likely(!ret)) { 666 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) /
653 nincsegs = atomic_read(&nilfs->ns_ndirtyblks) / 667 nilfs->ns_blocks_per_segment + 1;
654 nilfs->ns_blocks_per_segment + 1; 668
655 if (ncleansegs <= nilfs->ns_nrsvsegs + nincsegs) 669 return ncleansegs <= nilfs->ns_nrsvsegs + nincsegs;
656 ret++;
657 }
658 return ret;
659} 670}
660 671
661/** 672/**
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 20abd55881e0..589786e33464 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -258,6 +258,16 @@ static inline void nilfs_put_sbinfo(struct nilfs_sb_info *sbi)
258 kfree(sbi); 258 kfree(sbi);
259} 259}
260 260
261static inline int nilfs_valid_fs(struct the_nilfs *nilfs)
262{
263 unsigned valid_fs;
264
265 down_read(&nilfs->ns_sem);
266 valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS);
267 up_read(&nilfs->ns_sem);
268 return valid_fs;
269}
270
261static inline void 271static inline void
262nilfs_get_segment_range(struct the_nilfs *nilfs, __u64 segnum, 272nilfs_get_segment_range(struct the_nilfs *nilfs, __u64 segnum,
263 sector_t *seg_start, sector_t *seg_end) 273 sector_t *seg_start, sector_t *seg_end)
diff --git a/include/acpi/acpi_hest.h b/include/acpi/acpi_hest.h
new file mode 100644
index 000000000000..63194d03cb2d
--- /dev/null
+++ b/include/acpi/acpi_hest.h
@@ -0,0 +1,12 @@
1#ifndef __ACPI_HEST_H
2#define __ACPI_HEST_H
3
4#include <linux/pci.h>
5
6#ifdef CONFIG_ACPI
7extern int acpi_hest_firmware_first_pci(struct pci_dev *pci);
8#else
9static inline int acpi_hest_firmware_first_pci(struct pci_dev *pci) { return 0; }
10#endif
11
12#endif
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index b940fdfa3b25..cfa6af43c9ea 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -8,3 +8,4 @@ unifdef-y += radeon_drm.h
8unifdef-y += sis_drm.h 8unifdef-y += sis_drm.h
9unifdef-y += savage_drm.h 9unifdef-y += savage_drm.h
10unifdef-y += via_drm.h 10unifdef-y += via_drm.h
11unifdef-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index 7cb50bdde46d..e3f46e0cb7dc 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -36,17 +36,27 @@
36#ifndef _DRM_H_ 36#ifndef _DRM_H_
37#define _DRM_H_ 37#define _DRM_H_
38 38
39#if defined(__linux__)
40
39#include <linux/types.h> 41#include <linux/types.h>
40#include <asm/ioctl.h> /* For _IO* macros */ 42#include <asm/ioctl.h>
41#define DRM_IOCTL_NR(n) _IOC_NR(n) 43typedef unsigned int drm_handle_t;
42#define DRM_IOC_VOID _IOC_NONE
43#define DRM_IOC_READ _IOC_READ
44#define DRM_IOC_WRITE _IOC_WRITE
45#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
46#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
47 44
48#define DRM_MAJOR 226 45#else /* One of the BSDs */
49#define DRM_MAX_MINOR 15 46
47#include <sys/ioccom.h>
48#include <sys/types.h>
49typedef int8_t __s8;
50typedef uint8_t __u8;
51typedef int16_t __s16;
52typedef uint16_t __u16;
53typedef int32_t __s32;
54typedef uint32_t __u32;
55typedef int64_t __s64;
56typedef uint64_t __u64;
57typedef unsigned long drm_handle_t;
58
59#endif
50 60
51#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ 61#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
52#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */ 62#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
@@ -59,7 +69,6 @@
59#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT) 69#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
60#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT)) 70#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
61 71
62typedef unsigned int drm_handle_t;
63typedef unsigned int drm_context_t; 72typedef unsigned int drm_context_t;
64typedef unsigned int drm_drawable_t; 73typedef unsigned int drm_drawable_t;
65typedef unsigned int drm_magic_t; 74typedef unsigned int drm_magic_t;
@@ -454,6 +463,7 @@ struct drm_irq_busid {
454enum drm_vblank_seq_type { 463enum drm_vblank_seq_type {
455 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 464 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
456 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 465 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
466 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
457 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 467 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
458 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 468 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
459 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 469 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
@@ -461,8 +471,8 @@ enum drm_vblank_seq_type {
461}; 471};
462 472
463#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) 473#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
464#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \ 474#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
465 _DRM_VBLANK_NEXTONMISS) 475 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
466 476
467struct drm_wait_vblank_request { 477struct drm_wait_vblank_request {
468 enum drm_vblank_seq_type type; 478 enum drm_vblank_seq_type type;
@@ -686,6 +696,8 @@ struct drm_gem_open {
686#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd) 696#define DRM_IOCTL_MODE_GETFB DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
687#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd) 697#define DRM_IOCTL_MODE_ADDFB DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
688#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int) 698#define DRM_IOCTL_MODE_RMFB DRM_IOWR(0xAF, unsigned int)
699#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
700#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
689 701
690/** 702/**
691 * Device specific ioctls should only be in their respective headers 703 * Device specific ioctls should only be in their respective headers
@@ -698,6 +710,35 @@ struct drm_gem_open {
698#define DRM_COMMAND_BASE 0x40 710#define DRM_COMMAND_BASE 0x40
699#define DRM_COMMAND_END 0xA0 711#define DRM_COMMAND_END 0xA0
700 712
713/**
714 * Header for events written back to userspace on the drm fd. The
715 * type defines the type of event, the length specifies the total
716 * length of the event (including the header), and user_data is
717 * typically a 64 bit value passed with the ioctl that triggered the
718 * event. A read on the drm fd will always only return complete
719 * events, that is, if for example the read buffer is 100 bytes, and
720 * there are two 64 byte events pending, only one will be returned.
721 *
722 * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
723 * up are chipset specific.
724 */
725struct drm_event {
726 __u32 type;
727 __u32 length;
728};
729
730#define DRM_EVENT_VBLANK 0x01
731#define DRM_EVENT_FLIP_COMPLETE 0x02
732
733struct drm_event_vblank {
734 struct drm_event base;
735 __u64 user_data;
736 __u32 tv_sec;
737 __u32 tv_usec;
738 __u32 sequence;
739 __u32 reserved;
740};
741
701/* typedef area */ 742/* typedef area */
702#ifndef __KERNEL__ 743#ifndef __KERNEL__
703typedef struct drm_clip_rect drm_clip_rect_t; 744typedef struct drm_clip_rect drm_clip_rect_t;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c8e64bbadbcf..19ef8ebdc662 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -245,16 +245,6 @@ extern void drm_ut_debug_printk(unsigned int request_level,
245 245
246#endif 246#endif
247 247
248#define DRM_PROC_LIMIT (PAGE_SIZE-80)
249
250#define DRM_PROC_PRINT(fmt, arg...) \
251 len += sprintf(&buf[len], fmt , ##arg); \
252 if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; }
253
254#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \
255 len += sprintf(&buf[len], fmt , ##arg); \
256 if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; }
257
258/*@}*/ 248/*@}*/
259 249
260/***********************************************************************/ 250/***********************************************************************/
@@ -265,19 +255,8 @@ extern void drm_ut_debug_printk(unsigned int request_level,
265 255
266#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) 256#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
267#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) 257#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
268#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist)
269 258
270#define DRM_IF_VERSION(maj, min) (maj << 16 | min) 259#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
271/**
272 * Get the private SAREA mapping.
273 *
274 * \param _dev DRM device.
275 * \param _ctx context number.
276 * \param _map output mapping.
277 */
278#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \
279 (_map) = (_dev)->context_sareas[_ctx]; \
280} while(0)
281 260
282/** 261/**
283 * Test that the hardware lock is held by the caller, returning otherwise. 262 * Test that the hardware lock is held by the caller, returning otherwise.
@@ -297,18 +276,6 @@ do { \
297} while (0) 276} while (0)
298 277
299/** 278/**
300 * Copy and IOCTL return string to user space
301 */
302#define DRM_COPY( name, value ) \
303 len = strlen( value ); \
304 if ( len > name##_len ) len = name##_len; \
305 name##_len = strlen( value ); \
306 if ( len && name ) { \
307 if ( copy_to_user( name, value, len ) ) \
308 return -EFAULT; \
309 }
310
311/**
312 * Ioctl function type. 279 * Ioctl function type.
313 * 280 *
314 * \param inode device inode. 281 * \param inode device inode.
@@ -322,6 +289,9 @@ typedef int drm_ioctl_t(struct drm_device *dev, void *data,
322typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, 289typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
323 unsigned long arg); 290 unsigned long arg);
324 291
292#define DRM_IOCTL_NR(n) _IOC_NR(n)
293#define DRM_MAJOR 226
294
325#define DRM_AUTH 0x1 295#define DRM_AUTH 0x1
326#define DRM_MASTER 0x2 296#define DRM_MASTER 0x2
327#define DRM_ROOT_ONLY 0x4 297#define DRM_ROOT_ONLY 0x4
@@ -426,6 +396,14 @@ struct drm_buf_entry {
426 struct drm_freelist freelist; 396 struct drm_freelist freelist;
427}; 397};
428 398
399/* Event queued up for userspace to read */
400struct drm_pending_event {
401 struct drm_event *event;
402 struct list_head link;
403 struct drm_file *file_priv;
404 void (*destroy)(struct drm_pending_event *event);
405};
406
429/** File private data */ 407/** File private data */
430struct drm_file { 408struct drm_file {
431 int authenticated; 409 int authenticated;
@@ -449,6 +427,10 @@ struct drm_file {
449 struct drm_master *master; /* master this node is currently associated with 427 struct drm_master *master; /* master this node is currently associated with
450 N.B. not always minor->master */ 428 N.B. not always minor->master */
451 struct list_head fbs; 429 struct list_head fbs;
430
431 wait_queue_head_t event_wait;
432 struct list_head event_list;
433 int event_space;
452}; 434};
453 435
454/** Wait queue */ 436/** Wait queue */
@@ -795,6 +777,15 @@ struct drm_driver {
795 /* Master routines */ 777 /* Master routines */
796 int (*master_create)(struct drm_device *dev, struct drm_master *master); 778 int (*master_create)(struct drm_device *dev, struct drm_master *master);
797 void (*master_destroy)(struct drm_device *dev, struct drm_master *master); 779 void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
780 /**
781 * master_set is called whenever the minor master is set.
782 * master_drop is called whenever the minor master is dropped.
783 */
784
785 int (*master_set)(struct drm_device *dev, struct drm_file *file_priv,
786 bool from_open);
787 void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv,
788 bool from_release);
798 789
799 int (*proc_init)(struct drm_minor *minor); 790 int (*proc_init)(struct drm_minor *minor);
800 void (*proc_cleanup)(struct drm_minor *minor); 791 void (*proc_cleanup)(struct drm_minor *minor);
@@ -900,6 +891,12 @@ struct drm_minor {
900 struct drm_mode_group mode_group; 891 struct drm_mode_group mode_group;
901}; 892};
902 893
894struct drm_pending_vblank_event {
895 struct drm_pending_event base;
896 int pipe;
897 struct drm_event_vblank event;
898};
899
903/** 900/**
904 * DRM device structure. This structure represent a complete card that 901 * DRM device structure. This structure represent a complete card that
905 * may contain multiple heads. 902 * may contain multiple heads.
@@ -999,6 +996,12 @@ struct drm_device {
999 996
1000 u32 max_vblank_count; /**< size of vblank counter register */ 997 u32 max_vblank_count; /**< size of vblank counter register */
1001 998
999 /**
1000 * List of events
1001 */
1002 struct list_head vblank_event_list;
1003 spinlock_t event_lock;
1004
1002 /*@} */ 1005 /*@} */
1003 cycles_t ctx_start; 1006 cycles_t ctx_start;
1004 cycles_t lck_start; 1007 cycles_t lck_start;
@@ -1135,6 +1138,8 @@ extern int drm_lastclose(struct drm_device *dev);
1135extern int drm_open(struct inode *inode, struct file *filp); 1138extern int drm_open(struct inode *inode, struct file *filp);
1136extern int drm_stub_open(struct inode *inode, struct file *filp); 1139extern int drm_stub_open(struct inode *inode, struct file *filp);
1137extern int drm_fasync(int fd, struct file *filp, int on); 1140extern int drm_fasync(int fd, struct file *filp, int on);
1141extern ssize_t drm_read(struct file *filp, char __user *buffer,
1142 size_t count, loff_t *offset);
1138extern int drm_release(struct inode *inode, struct file *filp); 1143extern int drm_release(struct inode *inode, struct file *filp);
1139 1144
1140 /* Mapping support (drm_vm.h) */ 1145 /* Mapping support (drm_vm.h) */
@@ -1295,6 +1300,7 @@ extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
1295extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1300extern void drm_handle_vblank(struct drm_device *dev, int crtc);
1296extern int drm_vblank_get(struct drm_device *dev, int crtc); 1301extern int drm_vblank_get(struct drm_device *dev, int crtc);
1297extern void drm_vblank_put(struct drm_device *dev, int crtc); 1302extern void drm_vblank_put(struct drm_device *dev, int crtc);
1303extern void drm_vblank_off(struct drm_device *dev, int crtc);
1298extern void drm_vblank_cleanup(struct drm_device *dev); 1304extern void drm_vblank_cleanup(struct drm_device *dev);
1299/* Modesetting support */ 1305/* Modesetting support */
1300extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); 1306extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
@@ -1519,14 +1525,27 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
1519 1525
1520static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) 1526static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
1521{ 1527{
1528 if (size != 0 && nmemb > ULONG_MAX / size)
1529 return NULL;
1530
1522 if (size * nmemb <= PAGE_SIZE) 1531 if (size * nmemb <= PAGE_SIZE)
1523 return kcalloc(nmemb, size, GFP_KERNEL); 1532 return kcalloc(nmemb, size, GFP_KERNEL);
1524 1533
1534 return __vmalloc(size * nmemb,
1535 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
1536}
1537
1538/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
1539static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
1540{
1525 if (size != 0 && nmemb > ULONG_MAX / size) 1541 if (size != 0 && nmemb > ULONG_MAX / size)
1526 return NULL; 1542 return NULL;
1527 1543
1544 if (size * nmemb <= PAGE_SIZE)
1545 return kmalloc(nmemb * size, GFP_KERNEL);
1546
1528 return __vmalloc(size * nmemb, 1547 return __vmalloc(size * nmemb,
1529 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 1548 GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
1530} 1549}
1531 1550
1532static __inline void drm_free_large(void *ptr) 1551static __inline void drm_free_large(void *ptr)
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index b69347b8904f..fdf43abc36db 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -123,7 +123,7 @@ struct drm_display_mode {
123 int type; 123 int type;
124 124
125 /* Proposed mode values */ 125 /* Proposed mode values */
126 int clock; 126 int clock; /* in kHz */
127 int hdisplay; 127 int hdisplay;
128 int hsync_start; 128 int hsync_start;
129 int hsync_end; 129 int hsync_end;
@@ -164,8 +164,8 @@ struct drm_display_mode {
164 int *private; 164 int *private;
165 int private_flags; 165 int private_flags;
166 166
167 int vrefresh; 167 int vrefresh; /* in Hz */
168 float hsync; 168 int hsync; /* in kHz */
169}; 169};
170 170
171enum drm_connector_status { 171enum drm_connector_status {
@@ -242,6 +242,21 @@ struct drm_framebuffer_funcs {
242 int (*create_handle)(struct drm_framebuffer *fb, 242 int (*create_handle)(struct drm_framebuffer *fb,
243 struct drm_file *file_priv, 243 struct drm_file *file_priv,
244 unsigned int *handle); 244 unsigned int *handle);
245 /**
246 * Optinal callback for the dirty fb ioctl.
247 *
248 * Userspace can notify the driver via this callback
249 * that a area of the framebuffer has changed and should
250 * be flushed to the display hardware.
251 *
252 * See documentation in drm_mode.h for the struct
253 * drm_mode_fb_dirty_cmd for more information as all
254 * the semantics and arguments have a one to one mapping
255 * on this function.
256 */
257 int (*dirty)(struct drm_framebuffer *framebuffer, unsigned flags,
258 unsigned color, struct drm_clip_rect *clips,
259 unsigned num_clips);
245}; 260};
246 261
247struct drm_framebuffer { 262struct drm_framebuffer {
@@ -256,7 +271,7 @@ struct drm_framebuffer {
256 unsigned int depth; 271 unsigned int depth;
257 int bits_per_pixel; 272 int bits_per_pixel;
258 int flags; 273 int flags;
259 void *fbdev; 274 struct fb_info *fbdev;
260 u32 pseudo_palette[17]; 275 u32 pseudo_palette[17];
261 struct list_head filp_head; 276 struct list_head filp_head;
262 /* if you are using the helper */ 277 /* if you are using the helper */
@@ -290,6 +305,7 @@ struct drm_property {
290struct drm_crtc; 305struct drm_crtc;
291struct drm_connector; 306struct drm_connector;
292struct drm_encoder; 307struct drm_encoder;
308struct drm_pending_vblank_event;
293 309
294/** 310/**
295 * drm_crtc_funcs - control CRTCs for a given device 311 * drm_crtc_funcs - control CRTCs for a given device
@@ -333,6 +349,19 @@ struct drm_crtc_funcs {
333 void (*destroy)(struct drm_crtc *crtc); 349 void (*destroy)(struct drm_crtc *crtc);
334 350
335 int (*set_config)(struct drm_mode_set *set); 351 int (*set_config)(struct drm_mode_set *set);
352
353 /*
354 * Flip to the given framebuffer. This implements the page
355 * flip ioctl descibed in drm_mode.h, specifically, the
356 * implementation must return immediately and block all
357 * rendering to the current fb until the flip has completed.
358 * If userspace set the event flag in the ioctl, the event
359 * argument will point to an event to send back when the flip
360 * completes, otherwise it will be NULL.
361 */
362 int (*page_flip)(struct drm_crtc *crtc,
363 struct drm_framebuffer *fb,
364 struct drm_pending_vblank_event *event);
336}; 365};
337 366
338/** 367/**
@@ -596,6 +625,7 @@ struct drm_mode_config {
596 /* Optional properties */ 625 /* Optional properties */
597 struct drm_property *scaling_mode_property; 626 struct drm_property *scaling_mode_property;
598 struct drm_property *dithering_mode_property; 627 struct drm_property *dithering_mode_property;
628 struct drm_property *dirty_info_property;
599}; 629};
600 630
601#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 631#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -667,6 +697,7 @@ extern void drm_mode_validate_size(struct drm_device *dev,
667extern void drm_mode_prune_invalid(struct drm_device *dev, 697extern void drm_mode_prune_invalid(struct drm_device *dev,
668 struct list_head *mode_list, bool verbose); 698 struct list_head *mode_list, bool verbose);
669extern void drm_mode_sort(struct list_head *mode_list); 699extern void drm_mode_sort(struct list_head *mode_list);
700extern int drm_mode_hsync(struct drm_display_mode *mode);
670extern int drm_mode_vrefresh(struct drm_display_mode *mode); 701extern int drm_mode_vrefresh(struct drm_display_mode *mode);
671extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, 702extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
672 int adjust_flags); 703 int adjust_flags);
@@ -703,6 +734,7 @@ extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats
703 char *formats[]); 734 char *formats[]);
704extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 735extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
705extern int drm_mode_create_dithering_property(struct drm_device *dev); 736extern int drm_mode_create_dithering_property(struct drm_device *dev);
737extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
706extern char *drm_get_encoder_name(struct drm_encoder *encoder); 738extern char *drm_get_encoder_name(struct drm_encoder *encoder);
707 739
708extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 740extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
@@ -711,7 +743,8 @@ extern void drm_mode_connector_detach_encoder(struct drm_connector *connector,
711 struct drm_encoder *encoder); 743 struct drm_encoder *encoder);
712extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 744extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
713 int gamma_size); 745 int gamma_size);
714extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); 746extern struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
747 uint32_t id, uint32_t type);
715/* IOCTLs */ 748/* IOCTLs */
716extern int drm_mode_getresources(struct drm_device *dev, 749extern int drm_mode_getresources(struct drm_device *dev,
717 void *data, struct drm_file *file_priv); 750 void *data, struct drm_file *file_priv);
@@ -730,6 +763,8 @@ extern int drm_mode_rmfb(struct drm_device *dev,
730 void *data, struct drm_file *file_priv); 763 void *data, struct drm_file *file_priv);
731extern int drm_mode_getfb(struct drm_device *dev, 764extern int drm_mode_getfb(struct drm_device *dev,
732 void *data, struct drm_file *file_priv); 765 void *data, struct drm_file *file_priv);
766extern int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
767 void *data, struct drm_file *file_priv);
733extern int drm_mode_addmode_ioctl(struct drm_device *dev, 768extern int drm_mode_addmode_ioctl(struct drm_device *dev,
734 void *data, struct drm_file *file_priv); 769 void *data, struct drm_file *file_priv);
735extern int drm_mode_rmmode_ioctl(struct drm_device *dev, 770extern int drm_mode_rmmode_ioctl(struct drm_device *dev,
@@ -756,6 +791,8 @@ extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
756extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, 791extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
757 void *data, struct drm_file *file_priv); 792 void *data, struct drm_file *file_priv);
758extern bool drm_detect_hdmi_monitor(struct edid *edid); 793extern bool drm_detect_hdmi_monitor(struct edid *edid);
794extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
795 void *data, struct drm_file *file_priv);
759extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, 796extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,
760 int hdisplay, int vdisplay, int vrefresh, 797 int hdisplay, int vdisplay, int vrefresh,
761 bool reduced, bool interlaced, bool margins); 798 bool reduced, bool interlaced, bool margins);
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/include/drm/drm_dp_helper.h
index 2b38054d3b6d..a49e791db0b0 100644
--- a/drivers/gpu/drm/i915/intel_dp.h
+++ b/include/drm/drm_dp_helper.h
@@ -20,8 +20,8 @@
20 * OF THIS SOFTWARE. 20 * OF THIS SOFTWARE.
21 */ 21 */
22 22
23#ifndef _INTEL_DP_H_ 23#ifndef _DRM_DP_HELPER_H_
24#define _INTEL_DP_H_ 24#define _DRM_DP_HELPER_H_
25 25
26/* From the VESA DisplayPort spec */ 26/* From the VESA DisplayPort spec */
27 27
@@ -43,16 +43,41 @@
43#define AUX_I2C_REPLY_MASK (0x3 << 6) 43#define AUX_I2C_REPLY_MASK (0x3 << 6)
44 44
45/* AUX CH addresses */ 45/* AUX CH addresses */
46#define DP_LINK_BW_SET 0x100 46/* DPCD */
47#define DP_DPCD_REV 0x000
48
49#define DP_MAX_LINK_RATE 0x001
50
51#define DP_MAX_LANE_COUNT 0x002
52# define DP_MAX_LANE_COUNT_MASK 0x1f
53# define DP_ENHANCED_FRAME_CAP (1 << 7)
54
55#define DP_MAX_DOWNSPREAD 0x003
56# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
57
58#define DP_NORP 0x004
59
60#define DP_DOWNSTREAMPORT_PRESENT 0x005
61# define DP_DWN_STRM_PORT_PRESENT (1 << 0)
62# define DP_DWN_STRM_PORT_TYPE_MASK 0x06
63/* 00b = DisplayPort */
64/* 01b = Analog */
65/* 10b = TMDS or HDMI */
66/* 11b = Other */
67# define DP_FORMAT_CONVERSION (1 << 3)
68
69#define DP_MAIN_LINK_CHANNEL_CODING 0x006
70
71/* link configuration */
72#define DP_LINK_BW_SET 0x100
47# define DP_LINK_BW_1_62 0x06 73# define DP_LINK_BW_1_62 0x06
48# define DP_LINK_BW_2_7 0x0a 74# define DP_LINK_BW_2_7 0x0a
49 75
50#define DP_LANE_COUNT_SET 0x101 76#define DP_LANE_COUNT_SET 0x101
51# define DP_LANE_COUNT_MASK 0x0f 77# define DP_LANE_COUNT_MASK 0x0f
52# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) 78# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7)
53 79
54#define DP_TRAINING_PATTERN_SET 0x102 80#define DP_TRAINING_PATTERN_SET 0x102
55
56# define DP_TRAINING_PATTERN_DISABLE 0 81# define DP_TRAINING_PATTERN_DISABLE 0
57# define DP_TRAINING_PATTERN_1 1 82# define DP_TRAINING_PATTERN_1 1
58# define DP_TRAINING_PATTERN_2 2 83# define DP_TRAINING_PATTERN_2 2
@@ -102,11 +127,14 @@
102 127
103#define DP_LANE0_1_STATUS 0x202 128#define DP_LANE0_1_STATUS 0x202
104#define DP_LANE2_3_STATUS 0x203 129#define DP_LANE2_3_STATUS 0x203
105
106# define DP_LANE_CR_DONE (1 << 0) 130# define DP_LANE_CR_DONE (1 << 0)
107# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) 131# define DP_LANE_CHANNEL_EQ_DONE (1 << 1)
108# define DP_LANE_SYMBOL_LOCKED (1 << 2) 132# define DP_LANE_SYMBOL_LOCKED (1 << 2)
109 133
134#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \
135 DP_LANE_CHANNEL_EQ_DONE | \
136 DP_LANE_SYMBOL_LOCKED)
137
110#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 138#define DP_LANE_ALIGN_STATUS_UPDATED 0x204
111 139
112#define DP_INTERLANE_ALIGN_DONE (1 << 0) 140#define DP_INTERLANE_ALIGN_DONE (1 << 0)
@@ -120,25 +148,33 @@
120 148
121#define DP_ADJUST_REQUEST_LANE0_1 0x206 149#define DP_ADJUST_REQUEST_LANE0_1 0x206
122#define DP_ADJUST_REQUEST_LANE2_3 0x207 150#define DP_ADJUST_REQUEST_LANE2_3 0x207
123 151# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03
124#define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 152# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0
125#define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 153# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c
126#define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c 154# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2
127#define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 155# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30
128#define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 156# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4
129#define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 157# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
130#define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 158# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
131#define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 159
160#define DP_SET_POWER 0x600
161# define DP_SET_POWER_D0 0x1
162# define DP_SET_POWER_D3 0x2
163
164#define MODE_I2C_START 1
165#define MODE_I2C_WRITE 2
166#define MODE_I2C_READ 4
167#define MODE_I2C_STOP 8
132 168
133struct i2c_algo_dp_aux_data { 169struct i2c_algo_dp_aux_data {
134 bool running; 170 bool running;
135 u16 address; 171 u16 address;
136 int (*aux_ch) (struct i2c_adapter *adapter, 172 int (*aux_ch) (struct i2c_adapter *adapter,
137 uint8_t *send, int send_bytes, 173 int mode, uint8_t write_byte,
138 uint8_t *recv, int recv_bytes); 174 uint8_t *read_byte);
139}; 175};
140 176
141int 177int
142i2c_dp_aux_add_bus(struct i2c_adapter *adapter); 178i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
143 179
144#endif /* _INTEL_DP_H_ */ 180#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 7d6c9a2dfcbb..d33c3e038606 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -106,6 +106,10 @@ struct detailed_data_color_point {
106 u8 wpindex2[3]; 106 u8 wpindex2[3];
107} __attribute__((packed)); 107} __attribute__((packed));
108 108
109struct cvt_timing {
110 u8 code[3];
111} __attribute__((packed));
112
109struct detailed_non_pixel { 113struct detailed_non_pixel {
110 u8 pad1; 114 u8 pad1;
111 u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name 115 u8 type; /* ff=serial, fe=string, fd=monitor range, fc=monitor name
@@ -117,9 +121,13 @@ struct detailed_non_pixel {
117 struct detailed_data_monitor_range range; 121 struct detailed_data_monitor_range range;
118 struct detailed_data_wpindex color; 122 struct detailed_data_wpindex color;
119 struct std_timing timings[5]; 123 struct std_timing timings[5];
124 struct cvt_timing cvt[4];
120 } data; 125 } data;
121} __attribute__((packed)); 126} __attribute__((packed));
122 127
128#define EDID_DETAIL_EST_TIMINGS 0xf7
129#define EDID_DETAIL_CVT_3BYTE 0xf8
130#define EDID_DETAIL_COLOR_MGMT_DATA 0xf9
123#define EDID_DETAIL_STD_MODES 0xfa 131#define EDID_DETAIL_STD_MODES 0xfa
124#define EDID_DETAIL_MONITOR_CPDATA 0xfb 132#define EDID_DETAIL_MONITOR_CPDATA 0xfb
125#define EDID_DETAIL_MONITOR_NAME 0xfc 133#define EDID_DETAIL_MONITOR_NAME 0xfc
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 62329f9a42cb..4c10be39a43b 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -66,6 +66,13 @@ extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
66 unsigned long size, 66 unsigned long size,
67 unsigned alignment, 67 unsigned alignment,
68 int atomic); 68 int atomic);
69extern struct drm_mm_node *drm_mm_get_block_range_generic(
70 struct drm_mm_node *node,
71 unsigned long size,
72 unsigned alignment,
73 unsigned long start,
74 unsigned long end,
75 int atomic);
69static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent, 76static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
70 unsigned long size, 77 unsigned long size,
71 unsigned alignment) 78 unsigned alignment)
@@ -78,11 +85,38 @@ static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *pa
78{ 85{
79 return drm_mm_get_block_generic(parent, size, alignment, 1); 86 return drm_mm_get_block_generic(parent, size, alignment, 1);
80} 87}
88static inline struct drm_mm_node *drm_mm_get_block_range(
89 struct drm_mm_node *parent,
90 unsigned long size,
91 unsigned alignment,
92 unsigned long start,
93 unsigned long end)
94{
95 return drm_mm_get_block_range_generic(parent, size, alignment,
96 start, end, 0);
97}
98static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
99 struct drm_mm_node *parent,
100 unsigned long size,
101 unsigned alignment,
102 unsigned long start,
103 unsigned long end)
104{
105 return drm_mm_get_block_range_generic(parent, size, alignment,
106 start, end, 1);
107}
81extern void drm_mm_put_block(struct drm_mm_node *cur); 108extern void drm_mm_put_block(struct drm_mm_node *cur);
82extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, 109extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
83 unsigned long size, 110 unsigned long size,
84 unsigned alignment, 111 unsigned alignment,
85 int best_match); 112 int best_match);
113extern struct drm_mm_node *drm_mm_search_free_in_range(
114 const struct drm_mm *mm,
115 unsigned long size,
116 unsigned alignment,
117 unsigned long start,
118 unsigned long end,
119 int best_match);
86extern int drm_mm_init(struct drm_mm *mm, unsigned long start, 120extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
87 unsigned long size); 121 unsigned long size);
88extern void drm_mm_takedown(struct drm_mm *mm); 122extern void drm_mm_takedown(struct drm_mm *mm);
@@ -99,6 +133,7 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
99 return block->mm; 133 return block->mm;
100} 134}
101 135
136extern void drm_mm_debug_table(struct drm_mm *mm, const char *prefix);
102#ifdef CONFIG_DEBUG_FS 137#ifdef CONFIG_DEBUG_FS
103int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); 138int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm);
104#endif 139#endif
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 1f908416aedb..43009bc2e757 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -27,9 +27,6 @@
27#ifndef _DRM_MODE_H 27#ifndef _DRM_MODE_H
28#define _DRM_MODE_H 28#define _DRM_MODE_H
29 29
30#include <linux/kernel.h>
31#include <linux/types.h>
32
33#define DRM_DISPLAY_INFO_LEN 32 30#define DRM_DISPLAY_INFO_LEN 32
34#define DRM_CONNECTOR_NAME_LEN 32 31#define DRM_CONNECTOR_NAME_LEN 32
35#define DRM_DISPLAY_MODE_LEN 32 32#define DRM_DISPLAY_MODE_LEN 32
@@ -78,6 +75,11 @@
78#define DRM_MODE_DITHERING_OFF 0 75#define DRM_MODE_DITHERING_OFF 0
79#define DRM_MODE_DITHERING_ON 1 76#define DRM_MODE_DITHERING_ON 1
80 77
78/* Dirty info options */
79#define DRM_MODE_DIRTY_OFF 0
80#define DRM_MODE_DIRTY_ON 1
81#define DRM_MODE_DIRTY_ANNOTATE 2
82
81struct drm_mode_modeinfo { 83struct drm_mode_modeinfo {
82 __u32 clock; 84 __u32 clock;
83 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew; 85 __u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
@@ -225,6 +227,45 @@ struct drm_mode_fb_cmd {
225 __u32 handle; 227 __u32 handle;
226}; 228};
227 229
230#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
231#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
232#define DRM_MODE_FB_DIRTY_FLAGS 0x03
233
234/*
235 * Mark a region of a framebuffer as dirty.
236 *
237 * Some hardware does not automatically update display contents
238 * as a hardware or software draw to a framebuffer. This ioctl
239 * allows userspace to tell the kernel and the hardware what
240 * regions of the framebuffer have changed.
241 *
242 * The kernel or hardware is free to update more then just the
243 * region specified by the clip rects. The kernel or hardware
244 * may also delay and/or coalesce several calls to dirty into a
245 * single update.
246 *
247 * Userspace may annotate the updates, the annotates are a
248 * promise made by the caller that the change is either a copy
249 * of pixels or a fill of a single color in the region specified.
250 *
251 * If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
252 * the number of updated regions are half of num_clips given,
253 * where the clip rects are paired in src and dst. The width and
254 * height of each one of the pairs must match.
255 *
256 * If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
257 * promises that the region specified of the clip rects is filled
258 * completely with a single color as given in the color argument.
259 */
260
261struct drm_mode_fb_dirty_cmd {
262 __u32 fb_id;
263 __u32 flags;
264 __u32 color;
265 __u32 num_clips;
266 __u64 clips_ptr;
267};
268
228struct drm_mode_mode_cmd { 269struct drm_mode_mode_cmd {
229 __u32 connector_id; 270 __u32 connector_id;
230 struct drm_mode_modeinfo mode; 271 struct drm_mode_modeinfo mode;
@@ -268,4 +309,37 @@ struct drm_mode_crtc_lut {
268 __u64 blue; 309 __u64 blue;
269}; 310};
270 311
312#define DRM_MODE_PAGE_FLIP_EVENT 0x01
313#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
314
315/*
316 * Request a page flip on the specified crtc.
317 *
318 * This ioctl will ask KMS to schedule a page flip for the specified
319 * crtc. Once any pending rendering targeting the specified fb (as of
320 * ioctl time) has completed, the crtc will be reprogrammed to display
321 * that fb after the next vertical refresh. The ioctl returns
322 * immediately, but subsequent rendering to the current fb will block
323 * in the execbuffer ioctl until the page flip happens. If a page
324 * flip is already pending as the ioctl is called, EBUSY will be
325 * returned.
326 *
327 * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
328 * request that drm sends back a vblank event (see drm.h: struct
329 * drm_event_vblank) when the page flip is done. The user_data field
330 * passed in with this ioctl will be returned as the user_data field
331 * in the vblank event struct.
332 *
333 * The reserved field must be zero until we figure out something
334 * clever to use it for.
335 */
336
337struct drm_mode_crtc_page_flip {
338 __u32 crtc_id;
339 __u32 fb_id;
340 __u32 flags;
341 __u32 reserved;
342 __u64 user_data;
343};
344
271#endif 345#endif
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 26641e95e0a4..393369147a2d 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -123,5 +123,5 @@ do { \
123 remove_wait_queue(&(queue), &entry); \ 123 remove_wait_queue(&(queue), &entry); \
124} while (0) 124} while (0)
125 125
126#define DRM_WAKEUP( queue ) wake_up_interruptible( queue ) 126#define DRM_WAKEUP( queue ) wake_up( queue )
127#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) 127#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
diff --git a/include/drm/i2c/ch7006.h b/include/drm/i2c/ch7006.h
new file mode 100644
index 000000000000..8390b437a1f8
--- /dev/null
+++ b/include/drm/i2c/ch7006.h
@@ -0,0 +1,86 @@
1/*
2 * Copyright (C) 2009 Francisco Jerez.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef __DRM_I2C_CH7006_H__
28#define __DRM_I2C_CH7006_H__
29
30/**
31 * struct ch7006_encoder_params
32 *
33 * Describes how the ch7006 is wired up with the GPU. It should be
34 * used as the @params parameter of its @set_config method.
35 *
36 * See "http://www.chrontel.com/pdf/7006.pdf" for their precise
37 * meaning.
38 */
39struct ch7006_encoder_params {
40 enum {
41 CH7006_FORMAT_RGB16 = 0,
42 CH7006_FORMAT_YCrCb24m16,
43 CH7006_FORMAT_RGB24m16,
44 CH7006_FORMAT_RGB15,
45 CH7006_FORMAT_RGB24m12C,
46 CH7006_FORMAT_RGB24m12I,
47 CH7006_FORMAT_RGB24m8,
48 CH7006_FORMAT_RGB16m8,
49 CH7006_FORMAT_RGB15m8,
50 CH7006_FORMAT_YCrCb24m8,
51 } input_format;
52
53 enum {
54 CH7006_CLOCK_SLAVE = 0,
55 CH7006_CLOCK_MASTER,
56 } clock_mode;
57
58 enum {
59 CH7006_CLOCK_EDGE_NEG = 0,
60 CH7006_CLOCK_EDGE_POS,
61 } clock_edge;
62
63 int xcm, pcm;
64
65 enum {
66 CH7006_SYNC_SLAVE = 0,
67 CH7006_SYNC_MASTER,
68 } sync_direction;
69
70 enum {
71 CH7006_SYNC_SEPARATED = 0,
72 CH7006_SYNC_EMBEDDED,
73 } sync_encoding;
74
75 enum {
76 CH7006_POUT_1_8V = 0,
77 CH7006_POUT_3_3V,
78 } pout_level;
79
80 enum {
81 CH7006_ACTIVE_HSYNC = 0,
82 CH7006_ACTIVE_DSTART,
83 } active_detect;
84};
85
86#endif
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 7e0cb1da92e6..ec3f5e80a5df 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -27,11 +27,11 @@
27#ifndef _I915_DRM_H_ 27#ifndef _I915_DRM_H_
28#define _I915_DRM_H_ 28#define _I915_DRM_H_
29 29
30#include "drm.h"
31
30/* Please note that modifications to all structs defined here are 32/* Please note that modifications to all structs defined here are
31 * subject to backwards-compatibility constraints. 33 * subject to backwards-compatibility constraints.
32 */ 34 */
33#include <linux/types.h>
34#include "drm.h"
35 35
36/* Each region is a minimum of 16k, and there are at most 255 of them. 36/* Each region is a minimum of 16k, and there are at most 255 of them.
37 */ 37 */
@@ -186,6 +186,8 @@ typedef struct _drm_i915_sarea {
186#define DRM_I915_GEM_MMAP_GTT 0x24 186#define DRM_I915_GEM_MMAP_GTT 0x24
187#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25 187#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
188#define DRM_I915_GEM_MADVISE 0x26 188#define DRM_I915_GEM_MADVISE 0x26
189#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
190#define DRM_I915_OVERLAY_ATTRS 0x28
189 191
190#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 192#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
191#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) 193#define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
@@ -221,8 +223,10 @@ typedef struct _drm_i915_sarea {
221#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 223#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
222#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 224#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
223#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture) 225#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
224#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_intel_get_pipe_from_crtc_id) 226#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
225#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise) 227#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
228#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_IOCTL_I915_OVERLAY_ATTRS, struct drm_intel_overlay_put_image)
229#define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
226 230
227/* Allow drivers to submit batchbuffers directly to hardware, relying 231/* Allow drivers to submit batchbuffers directly to hardware, relying
228 * on the security mechanisms provided by hardware. 232 * on the security mechanisms provided by hardware.
@@ -266,6 +270,8 @@ typedef struct drm_i915_irq_wait {
266#define I915_PARAM_CHIPSET_ID 4 270#define I915_PARAM_CHIPSET_ID 4
267#define I915_PARAM_HAS_GEM 5 271#define I915_PARAM_HAS_GEM 5
268#define I915_PARAM_NUM_FENCES_AVAIL 6 272#define I915_PARAM_NUM_FENCES_AVAIL 6
273#define I915_PARAM_HAS_OVERLAY 7
274#define I915_PARAM_HAS_PAGEFLIPPING 8
269 275
270typedef struct drm_i915_getparam { 276typedef struct drm_i915_getparam {
271 int param; 277 int param;
@@ -686,4 +692,70 @@ struct drm_i915_gem_madvise {
686 __u32 retained; 692 __u32 retained;
687}; 693};
688 694
695/* flags */
696#define I915_OVERLAY_TYPE_MASK 0xff
697#define I915_OVERLAY_YUV_PLANAR 0x01
698#define I915_OVERLAY_YUV_PACKED 0x02
699#define I915_OVERLAY_RGB 0x03
700
701#define I915_OVERLAY_DEPTH_MASK 0xff00
702#define I915_OVERLAY_RGB24 0x1000
703#define I915_OVERLAY_RGB16 0x2000
704#define I915_OVERLAY_RGB15 0x3000
705#define I915_OVERLAY_YUV422 0x0100
706#define I915_OVERLAY_YUV411 0x0200
707#define I915_OVERLAY_YUV420 0x0300
708#define I915_OVERLAY_YUV410 0x0400
709
710#define I915_OVERLAY_SWAP_MASK 0xff0000
711#define I915_OVERLAY_NO_SWAP 0x000000
712#define I915_OVERLAY_UV_SWAP 0x010000
713#define I915_OVERLAY_Y_SWAP 0x020000
714#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
715
716#define I915_OVERLAY_FLAGS_MASK 0xff000000
717#define I915_OVERLAY_ENABLE 0x01000000
718
719struct drm_intel_overlay_put_image {
720 /* various flags and src format description */
721 __u32 flags;
722 /* source picture description */
723 __u32 bo_handle;
724 /* stride values and offsets are in bytes, buffer relative */
725 __u16 stride_Y; /* stride for packed formats */
726 __u16 stride_UV;
727 __u32 offset_Y; /* offset for packet formats */
728 __u32 offset_U;
729 __u32 offset_V;
730 /* in pixels */
731 __u16 src_width;
732 __u16 src_height;
733 /* to compensate the scaling factors for partially covered surfaces */
734 __u16 src_scan_width;
735 __u16 src_scan_height;
736 /* output crtc description */
737 __u32 crtc_id;
738 __u16 dst_x;
739 __u16 dst_y;
740 __u16 dst_width;
741 __u16 dst_height;
742};
743
744/* flags */
745#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
746#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
747struct drm_intel_overlay_attrs {
748 __u32 flags;
749 __u32 color_key;
750 __s32 brightness;
751 __u32 contrast;
752 __u32 saturation;
753 __u32 gamma0;
754 __u32 gamma1;
755 __u32 gamma2;
756 __u32 gamma3;
757 __u32 gamma4;
758 __u32 gamma5;
759};
760
689#endif /* _I915_DRM_H_ */ 761#endif /* _I915_DRM_H_ */
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h
index 325fd6fb4a42..3ffbc4798afa 100644
--- a/include/drm/mga_drm.h
+++ b/include/drm/mga_drm.h
@@ -35,7 +35,7 @@
35#ifndef __MGA_DRM_H__ 35#ifndef __MGA_DRM_H__
36#define __MGA_DRM_H__ 36#define __MGA_DRM_H__
37 37
38#include <linux/types.h> 38#include "drm.h"
39 39
40/* WARNING: If you change any of these defines, make sure to change the 40/* WARNING: If you change any of these defines, make sure to change the
41 * defines in the Xserver file (mga_sarea.h) 41 * defines in the Xserver file (mga_sarea.h)
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
new file mode 100644
index 000000000000..1e67c441ea82
--- /dev/null
+++ b/include/drm/nouveau_drm.h
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2005 Stephane Marchesin.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25#ifndef __NOUVEAU_DRM_H__
26#define __NOUVEAU_DRM_H__
27
28#define NOUVEAU_DRM_HEADER_PATCHLEVEL 15
29
30struct drm_nouveau_channel_alloc {
31 uint32_t fb_ctxdma_handle;
32 uint32_t tt_ctxdma_handle;
33
34 int channel;
35
36 /* Notifier memory */
37 uint32_t notifier_handle;
38
39 /* DRM-enforced subchannel assignments */
40 struct {
41 uint32_t handle;
42 uint32_t grclass;
43 } subchan[8];
44 uint32_t nr_subchan;
45};
46
47struct drm_nouveau_channel_free {
48 int channel;
49};
50
51struct drm_nouveau_grobj_alloc {
52 int channel;
53 uint32_t handle;
54 int class;
55};
56
57struct drm_nouveau_notifierobj_alloc {
58 uint32_t channel;
59 uint32_t handle;
60 uint32_t size;
61 uint32_t offset;
62};
63
64struct drm_nouveau_gpuobj_free {
65 int channel;
66 uint32_t handle;
67};
68
69/* FIXME : maybe unify {GET,SET}PARAMs */
70#define NOUVEAU_GETPARAM_PCI_VENDOR 3
71#define NOUVEAU_GETPARAM_PCI_DEVICE 4
72#define NOUVEAU_GETPARAM_BUS_TYPE 5
73#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
74#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
75#define NOUVEAU_GETPARAM_FB_SIZE 8
76#define NOUVEAU_GETPARAM_AGP_SIZE 9
77#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
78#define NOUVEAU_GETPARAM_CHIPSET_ID 11
79#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
80struct drm_nouveau_getparam {
81 uint64_t param;
82 uint64_t value;
83};
84
85struct drm_nouveau_setparam {
86 uint64_t param;
87 uint64_t value;
88};
89
90#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
91#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
92#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
93#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
94
95struct drm_nouveau_gem_info {
96 uint32_t handle;
97 uint32_t domain;
98 uint64_t size;
99 uint64_t offset;
100 uint64_t map_handle;
101 uint32_t tile_mode;
102 uint32_t tile_flags;
103};
104
105struct drm_nouveau_gem_new {
106 struct drm_nouveau_gem_info info;
107 uint32_t channel_hint;
108 uint32_t align;
109};
110
111struct drm_nouveau_gem_pushbuf_bo {
112 uint64_t user_priv;
113 uint32_t handle;
114 uint32_t read_domains;
115 uint32_t write_domains;
116 uint32_t valid_domains;
117 uint32_t presumed_ok;
118 uint32_t presumed_domain;
119 uint64_t presumed_offset;
120};
121
122#define NOUVEAU_GEM_RELOC_LOW (1 << 0)
123#define NOUVEAU_GEM_RELOC_HIGH (1 << 1)
124#define NOUVEAU_GEM_RELOC_OR (1 << 2)
125struct drm_nouveau_gem_pushbuf_reloc {
126 uint32_t bo_index;
127 uint32_t reloc_index;
128 uint32_t flags;
129 uint32_t data;
130 uint32_t vor;
131 uint32_t tor;
132};
133
134#define NOUVEAU_GEM_MAX_BUFFERS 1024
135#define NOUVEAU_GEM_MAX_RELOCS 1024
136
137struct drm_nouveau_gem_pushbuf {
138 uint32_t channel;
139 uint32_t nr_dwords;
140 uint32_t nr_buffers;
141 uint32_t nr_relocs;
142 uint64_t dwords;
143 uint64_t buffers;
144 uint64_t relocs;
145};
146
147struct drm_nouveau_gem_pushbuf_call {
148 uint32_t channel;
149 uint32_t handle;
150 uint32_t offset;
151 uint32_t nr_buffers;
152 uint32_t nr_relocs;
153 uint32_t nr_dwords;
154 uint64_t buffers;
155 uint64_t relocs;
156 uint32_t suffix0;
157 uint32_t suffix1;
158 /* below only accessed for CALL2 */
159 uint64_t vram_available;
160 uint64_t gart_available;
161};
162
163struct drm_nouveau_gem_pin {
164 uint32_t handle;
165 uint32_t domain;
166 uint64_t offset;
167};
168
169struct drm_nouveau_gem_unpin {
170 uint32_t handle;
171};
172
173#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
174#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
175#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
176struct drm_nouveau_gem_cpu_prep {
177 uint32_t handle;
178 uint32_t flags;
179};
180
181struct drm_nouveau_gem_cpu_fini {
182 uint32_t handle;
183};
184
185struct drm_nouveau_gem_tile {
186 uint32_t handle;
187 uint32_t offset;
188 uint32_t size;
189 uint32_t tile_mode;
190 uint32_t tile_flags;
191};
192
193enum nouveau_bus_type {
194 NV_AGP = 0,
195 NV_PCI = 1,
196 NV_PCIE = 2,
197};
198
199struct drm_nouveau_sarea {
200};
201
202#define DRM_NOUVEAU_CARD_INIT 0x00
203#define DRM_NOUVEAU_GETPARAM 0x01
204#define DRM_NOUVEAU_SETPARAM 0x02
205#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03
206#define DRM_NOUVEAU_CHANNEL_FREE 0x04
207#define DRM_NOUVEAU_GROBJ_ALLOC 0x05
208#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06
209#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
210#define DRM_NOUVEAU_GEM_NEW 0x40
211#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
212#define DRM_NOUVEAU_GEM_PUSHBUF_CALL 0x42
213#define DRM_NOUVEAU_GEM_PIN 0x43 /* !KMS only */
214#define DRM_NOUVEAU_GEM_UNPIN 0x44 /* !KMS only */
215#define DRM_NOUVEAU_GEM_CPU_PREP 0x45
216#define DRM_NOUVEAU_GEM_CPU_FINI 0x46
217#define DRM_NOUVEAU_GEM_INFO 0x47
218#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48
219
220#endif /* __NOUVEAU_DRM_H__ */
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 3b9932ab1756..39537f3cf98a 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <linux/types.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 491146170522..81eb9f45883c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,6 +44,29 @@ struct ttm_bo_device;
44 44
45struct drm_mm_node; 45struct drm_mm_node;
46 46
47
48/**
49 * struct ttm_placement
50 *
51 * @fpfn: first valid page frame number to put the object
52 * @lpfn: last valid page frame number to put the object
53 * @num_placement: number of prefered placements
54 * @placement: prefered placements
55 * @num_busy_placement: number of prefered placements when need to evict buffer
56 * @busy_placement: prefered placements when need to evict buffer
57 *
58 * Structure indicating the placement you request for an object.
59 */
60struct ttm_placement {
61 unsigned fpfn;
62 unsigned lpfn;
63 unsigned num_placement;
64 const uint32_t *placement;
65 unsigned num_busy_placement;
66 const uint32_t *busy_placement;
67};
68
69
47/** 70/**
48 * struct ttm_mem_reg 71 * struct ttm_mem_reg
49 * 72 *
@@ -109,10 +132,6 @@ struct ttm_tt;
109 * the object is destroyed. 132 * the object is destroyed.
110 * @event_queue: Queue for processes waiting on buffer object status change. 133 * @event_queue: Queue for processes waiting on buffer object status change.
111 * @lock: spinlock protecting mostly synchronization members. 134 * @lock: spinlock protecting mostly synchronization members.
112 * @proposed_placement: Proposed placement for the buffer. Changed only by the
113 * creator prior to validation as opposed to bo->mem.proposed_flags which is
114 * changed by the implementation prior to a buffer move if it wants to outsmart
115 * the buffer creator / user. This latter happens, for example, at eviction.
116 * @mem: structure describing current placement. 135 * @mem: structure describing current placement.
117 * @persistant_swap_storage: Usually the swap storage is deleted for buffers 136 * @persistant_swap_storage: Usually the swap storage is deleted for buffers
118 * pinned in physical memory. If this behaviour is not desired, this member 137 * pinned in physical memory. If this behaviour is not desired, this member
@@ -177,7 +196,6 @@ struct ttm_buffer_object {
177 * Members protected by the bo::reserved lock. 196 * Members protected by the bo::reserved lock.
178 */ 197 */
179 198
180 uint32_t proposed_placement;
181 struct ttm_mem_reg mem; 199 struct ttm_mem_reg mem;
182 struct file *persistant_swap_storage; 200 struct file *persistant_swap_storage;
183 struct ttm_tt *ttm; 201 struct ttm_tt *ttm;
@@ -285,29 +303,30 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
285 * Note: It might be necessary to block validations before the 303 * Note: It might be necessary to block validations before the
286 * wait by reserving the buffer. 304 * wait by reserving the buffer.
287 * Returns -EBUSY if no_wait is true and the buffer is busy. 305 * Returns -EBUSY if no_wait is true and the buffer is busy.
288 * Returns -ERESTART if interrupted by a signal. 306 * Returns -ERESTARTSYS if interrupted by a signal.
289 */ 307 */
290extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, 308extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
291 bool interruptible, bool no_wait); 309 bool interruptible, bool no_wait);
292/** 310/**
293 * ttm_buffer_object_validate 311 * ttm_bo_validate
294 * 312 *
295 * @bo: The buffer object. 313 * @bo: The buffer object.
296 * @proposed_placement: Proposed_placement for the buffer object. 314 * @placement: Proposed placement for the buffer object.
297 * @interruptible: Sleep interruptible if sleeping. 315 * @interruptible: Sleep interruptible if sleeping.
298 * @no_wait: Return immediately if the buffer is busy. 316 * @no_wait: Return immediately if the buffer is busy.
299 * 317 *
300 * Changes placement and caching policy of the buffer object 318 * Changes placement and caching policy of the buffer object
301 * according to bo::proposed_flags. 319 * according proposed placement.
302 * Returns 320 * Returns
303 * -EINVAL on invalid proposed_flags. 321 * -EINVAL on invalid proposed placement.
304 * -ENOMEM on out-of-memory condition. 322 * -ENOMEM on out-of-memory condition.
305 * -EBUSY if no_wait is true and buffer busy. 323 * -EBUSY if no_wait is true and buffer busy.
306 * -ERESTART if interrupted by a signal. 324 * -ERESTARTSYS if interrupted by a signal.
307 */ 325 */
308extern int ttm_buffer_object_validate(struct ttm_buffer_object *bo, 326extern int ttm_bo_validate(struct ttm_buffer_object *bo,
309 uint32_t proposed_placement, 327 struct ttm_placement *placement,
310 bool interruptible, bool no_wait); 328 bool interruptible, bool no_wait);
329
311/** 330/**
312 * ttm_bo_unref 331 * ttm_bo_unref
313 * 332 *
@@ -328,7 +347,7 @@ extern void ttm_bo_unref(struct ttm_buffer_object **bo);
328 * waiting for buffer idle. This lock is recursive. 347 * waiting for buffer idle. This lock is recursive.
329 * Returns 348 * Returns
330 * -EBUSY if the buffer is busy and no_wait is true. 349 * -EBUSY if the buffer is busy and no_wait is true.
331 * -ERESTART if interrupted by a signal. 350 * -ERESTARTSYS if interrupted by a signal.
332 */ 351 */
333 352
334extern int 353extern int
@@ -343,7 +362,7 @@ ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
343extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo); 362extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
344 363
345/** 364/**
346 * ttm_buffer_object_init 365 * ttm_bo_init
347 * 366 *
348 * @bdev: Pointer to a ttm_bo_device struct. 367 * @bdev: Pointer to a ttm_bo_device struct.
349 * @bo: Pointer to a ttm_buffer_object to be initialized. 368 * @bo: Pointer to a ttm_buffer_object to be initialized.
@@ -371,20 +390,20 @@ extern void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
371 * Returns 390 * Returns
372 * -ENOMEM: Out of memory. 391 * -ENOMEM: Out of memory.
373 * -EINVAL: Invalid placement flags. 392 * -EINVAL: Invalid placement flags.
374 * -ERESTART: Interrupted by signal while sleeping waiting for resources. 393 * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
375 */ 394 */
376 395
377extern int ttm_buffer_object_init(struct ttm_bo_device *bdev, 396extern int ttm_bo_init(struct ttm_bo_device *bdev,
378 struct ttm_buffer_object *bo, 397 struct ttm_buffer_object *bo,
379 unsigned long size, 398 unsigned long size,
380 enum ttm_bo_type type, 399 enum ttm_bo_type type,
381 uint32_t flags, 400 struct ttm_placement *placement,
382 uint32_t page_alignment, 401 uint32_t page_alignment,
383 unsigned long buffer_start, 402 unsigned long buffer_start,
384 bool interrubtible, 403 bool interrubtible,
385 struct file *persistant_swap_storage, 404 struct file *persistant_swap_storage,
386 size_t acc_size, 405 size_t acc_size,
387 void (*destroy) (struct ttm_buffer_object *)); 406 void (*destroy) (struct ttm_buffer_object *));
388/** 407/**
389 * ttm_bo_synccpu_object_init 408 * ttm_bo_synccpu_object_init
390 * 409 *
@@ -405,47 +424,43 @@ extern int ttm_buffer_object_init(struct ttm_bo_device *bdev,
405 * GEM user interface. 424 * GEM user interface.
406 * @p_bo: On successful completion *p_bo points to the created object. 425 * @p_bo: On successful completion *p_bo points to the created object.
407 * 426 *
408 * This function allocates a ttm_buffer_object, and then calls 427 * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
409 * ttm_buffer_object_init on that object. 428 * on that object. The destroy function is set to kfree().
410 * The destroy function is set to kfree().
411 * Returns 429 * Returns
412 * -ENOMEM: Out of memory. 430 * -ENOMEM: Out of memory.
413 * -EINVAL: Invalid placement flags. 431 * -EINVAL: Invalid placement flags.
414 * -ERESTART: Interrupted by signal while waiting for resources. 432 * -ERESTARTSYS: Interrupted by signal while waiting for resources.
415 */ 433 */
416 434
417extern int ttm_buffer_object_create(struct ttm_bo_device *bdev, 435extern int ttm_bo_create(struct ttm_bo_device *bdev,
418 unsigned long size, 436 unsigned long size,
419 enum ttm_bo_type type, 437 enum ttm_bo_type type,
420 uint32_t flags, 438 struct ttm_placement *placement,
421 uint32_t page_alignment, 439 uint32_t page_alignment,
422 unsigned long buffer_start, 440 unsigned long buffer_start,
423 bool interruptible, 441 bool interruptible,
424 struct file *persistant_swap_storage, 442 struct file *persistant_swap_storage,
425 struct ttm_buffer_object **p_bo); 443 struct ttm_buffer_object **p_bo);
426 444
427/** 445/**
428 * ttm_bo_check_placement 446 * ttm_bo_check_placement
429 * 447 *
430 * @bo: the buffer object. 448 * @bo: the buffer object.
431 * @set_flags: placement flags to set. 449 * @placement: placements
432 * @clr_flags: placement flags to clear.
433 * 450 *
434 * Performs minimal validity checking on an intended change of 451 * Performs minimal validity checking on an intended change of
435 * placement flags. 452 * placement flags.
436 * Returns 453 * Returns
437 * -EINVAL: Intended change is invalid or not allowed. 454 * -EINVAL: Intended change is invalid or not allowed.
438 */ 455 */
439
440extern int ttm_bo_check_placement(struct ttm_buffer_object *bo, 456extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
441 uint32_t set_flags, uint32_t clr_flags); 457 struct ttm_placement *placement);
442 458
443/** 459/**
444 * ttm_bo_init_mm 460 * ttm_bo_init_mm
445 * 461 *
446 * @bdev: Pointer to a ttm_bo_device struct. 462 * @bdev: Pointer to a ttm_bo_device struct.
447 * @mem_type: The memory type. 463 * @mem_type: The memory type.
448 * @p_offset: offset for managed area in pages.
449 * @p_size: size managed area in pages. 464 * @p_size: size managed area in pages.
450 * 465 *
451 * Initialize a manager for a given memory type. 466 * Initialize a manager for a given memory type.
@@ -458,7 +473,7 @@ extern int ttm_bo_check_placement(struct ttm_buffer_object *bo,
458 */ 473 */
459 474
460extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, 475extern int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
461 unsigned long p_offset, unsigned long p_size); 476 unsigned long p_size);
462/** 477/**
463 * ttm_bo_clean_mm 478 * ttm_bo_clean_mm
464 * 479 *
@@ -503,7 +518,7 @@ extern int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
503 * 518 *
504 * Returns: 519 * Returns:
505 * -EINVAL: Invalid or uninitialized memory type. 520 * -EINVAL: Invalid or uninitialized memory type.
506 * -ERESTART: The call was interrupted by a signal while waiting to 521 * -ERESTARTSYS: The call was interrupted by a signal while waiting to
507 * evict a buffer. 522 * evict a buffer.
508 */ 523 */
509 524
@@ -606,7 +621,7 @@ extern int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
606 * be called from the fops::read and fops::write method. 621 * be called from the fops::read and fops::write method.
607 * Returns: 622 * Returns:
608 * See man (2) write, man(2) read. In particular, 623 * See man (2) write, man(2) read. In particular,
609 * the function may return -EINTR if 624 * the function may return -ERESTARTSYS if
610 * interrupted by a signal. 625 * interrupted by a signal.
611 */ 626 */
612 627
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e8cd6d20aed2..ff7664e0c3cd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -242,12 +242,6 @@ struct ttm_mem_type_manager {
242/** 242/**
243 * struct ttm_bo_driver 243 * struct ttm_bo_driver
244 * 244 *
245 * @mem_type_prio: Priority array of memory types to place a buffer object in
246 * if it fits without evicting buffers from any of these memory types.
247 * @mem_busy_prio: Priority array of memory types to place a buffer object in
248 * if it needs to evict buffers to make room.
249 * @num_mem_type_prio: Number of elements in the @mem_type_prio array.
250 * @num_mem_busy_prio: Number of elements in the @num_mem_busy_prio array.
251 * @create_ttm_backend_entry: Callback to create a struct ttm_backend. 245 * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
252 * @invalidate_caches: Callback to invalidate read caches when a buffer object 246 * @invalidate_caches: Callback to invalidate read caches when a buffer object
253 * has been evicted. 247 * has been evicted.
@@ -265,11 +259,6 @@ struct ttm_mem_type_manager {
265 */ 259 */
266 260
267struct ttm_bo_driver { 261struct ttm_bo_driver {
268 const uint32_t *mem_type_prio;
269 const uint32_t *mem_busy_prio;
270 uint32_t num_mem_type_prio;
271 uint32_t num_mem_busy_prio;
272
273 /** 262 /**
274 * struct ttm_bo_driver member create_ttm_backend_entry 263 * struct ttm_bo_driver member create_ttm_backend_entry
275 * 264 *
@@ -306,7 +295,8 @@ struct ttm_bo_driver {
306 * finished, they'll end up in bo->mem.flags 295 * finished, they'll end up in bo->mem.flags
307 */ 296 */
308 297
309 uint32_t(*evict_flags) (struct ttm_buffer_object *bo); 298 void(*evict_flags) (struct ttm_buffer_object *bo,
299 struct ttm_placement *placement);
310 /** 300 /**
311 * struct ttm_bo_driver member move: 301 * struct ttm_bo_driver member move:
312 * 302 *
@@ -545,6 +535,15 @@ extern int ttm_tt_set_user(struct ttm_tt *ttm,
545extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem); 535extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
546 536
547/** 537/**
538 * ttm_tt_populate:
539 *
540 * @ttm: The struct ttm_tt to contain the backing pages.
541 *
542 * Add backing pages to all of @ttm
543 */
544extern int ttm_tt_populate(struct ttm_tt *ttm);
545
546/**
548 * ttm_ttm_destroy: 547 * ttm_ttm_destroy:
549 * 548 *
550 * @ttm: The struct ttm_tt. 549 * @ttm: The struct ttm_tt.
@@ -639,12 +638,12 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
639 * -EBUSY: No space available (only if no_wait == 1). 638 * -EBUSY: No space available (only if no_wait == 1).
640 * -ENOMEM: Could not allocate memory for the buffer object, either due to 639 * -ENOMEM: Could not allocate memory for the buffer object, either due to
641 * fragmentation or concurrent allocators. 640 * fragmentation or concurrent allocators.
642 * -ERESTART: An interruptible sleep was interrupted by a signal. 641 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
643 */ 642 */
644extern int ttm_bo_mem_space(struct ttm_buffer_object *bo, 643extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
645 uint32_t proposed_placement, 644 struct ttm_placement *placement,
646 struct ttm_mem_reg *mem, 645 struct ttm_mem_reg *mem,
647 bool interruptible, bool no_wait); 646 bool interruptible, bool no_wait);
648/** 647/**
649 * ttm_bo_wait_for_cpu 648 * ttm_bo_wait_for_cpu
650 * 649 *
@@ -654,7 +653,7 @@ extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
654 * Wait until a buffer object is no longer sync'ed for CPU access. 653 * Wait until a buffer object is no longer sync'ed for CPU access.
655 * Returns: 654 * Returns:
656 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1). 655 * -EBUSY: Buffer object was sync'ed for CPU access. (only if no_wait == 1).
657 * -ERESTART: An interruptible sleep was interrupted by a signal. 656 * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
658 */ 657 */
659 658
660extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait); 659extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
@@ -758,7 +757,7 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
758 * -EAGAIN: The reservation may cause a deadlock. 757 * -EAGAIN: The reservation may cause a deadlock.
759 * Release all buffer reservations, wait for @bo to become unreserved and 758 * Release all buffer reservations, wait for @bo to become unreserved and
760 * try again. (only if use_sequence == 1). 759 * try again. (only if use_sequence == 1).
761 * -ERESTART: A wait for the buffer to become unreserved was interrupted by 760 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
762 * a signal. Release all buffer reservations and return to user-space. 761 * a signal. Release all buffer reservations and return to user-space.
763 */ 762 */
764extern int ttm_bo_reserve(struct ttm_buffer_object *bo, 763extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
@@ -799,7 +798,7 @@ extern int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo,
799 * 798 *
800 * Returns: 799 * Returns:
801 * -EBUSY: If no_wait == 1 and the buffer is already reserved. 800 * -EBUSY: If no_wait == 1 and the buffer is already reserved.
802 * -ERESTART: If interruptible == 1 and the process received a signal 801 * -ERESTARTSYS: If interruptible == 1 and the process received a signal
803 * while sleeping. 802 * while sleeping.
804 */ 803 */
805extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo, 804extern int ttm_bo_block_reservation(struct ttm_buffer_object *bo,
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
new file mode 100644
index 000000000000..cd2c475da9ea
--- /dev/null
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -0,0 +1,107 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31#ifndef _TTM_EXECBUF_UTIL_H_
32#define _TTM_EXECBUF_UTIL_H_
33
34#include "ttm/ttm_bo_api.h"
35#include <linux/list.h>
36
37/**
38 * struct ttm_validate_buffer
39 *
40 * @head: list head for thread-private list.
41 * @bo: refcounted buffer object pointer.
42 * @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
43 * adding a new sync object.
44 * @reservied: Indicates whether @bo has been reserved for validation.
45 */
46
47struct ttm_validate_buffer {
48 struct list_head head;
49 struct ttm_buffer_object *bo;
50 void *new_sync_obj_arg;
51 bool reserved;
52};
53
54/**
55 * function ttm_eu_backoff_reservation
56 *
57 * @list: thread private list of ttm_validate_buffer structs.
58 *
59 * Undoes all buffer validation reservations for bos pointed to by
60 * the list entries.
61 */
62
63extern void ttm_eu_backoff_reservation(struct list_head *list);
64
65/**
66 * function ttm_eu_reserve_buffers
67 *
68 * @list: thread private list of ttm_validate_buffer structs.
69 * @val_seq: A unique sequence number.
70 *
71 * Tries to reserve bos pointed to by the list entries for validation.
72 * If the function returns 0, all buffers are marked as "unfenced",
73 * taken off the lru lists and are not synced for write CPU usage.
74 *
75 * If the function detects a deadlock due to multiple threads trying to
76 * reserve the same buffers in reverse order, all threads except one will
77 * back off and retry. This function may sleep while waiting for
78 * CPU write reservations to be cleared, and for other threads to
79 * unreserve their buffers.
80 *
81 * This function may return -ERESTART or -EAGAIN if the calling process
82 * receives a signal while waiting. In that case, no buffers on the list
83 * will be reserved upon return.
84 *
85 * Buffers reserved by this function should be unreserved by
86 * a call to either ttm_eu_backoff_reservation() or
87 * ttm_eu_fence_buffer_objects() when command submission is complete or
88 * has failed.
89 */
90
91extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
92
93/**
94 * function ttm_eu_fence_buffer_objects.
95 *
96 * @list: thread private list of ttm_validate_buffer structs.
97 * @sync_obj: The new sync object for the buffers.
98 *
99 * This function should be called when command submission is complete, and
100 * it will add a new sync object to bos pointed to by entries on @list.
101 * It also unreserves all buffers, putting them on lru lists.
102 *
103 */
104
105extern void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj);
106
107#endif
diff --git a/include/drm/ttm/ttm_lock.h b/include/drm/ttm/ttm_lock.h
new file mode 100644
index 000000000000..81ba0b0b891a
--- /dev/null
+++ b/include/drm/ttm/ttm_lock.h
@@ -0,0 +1,247 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31/** @file ttm_lock.h
32 * This file implements a simple replacement for the buffer manager use
33 * of the DRM heavyweight hardware lock.
34 * The lock is a read-write lock. Taking it in read mode and write mode
35 * is relatively fast, and intended for in-kernel use only.
36 *
37 * The vt mode is used only when there is a need to block all
38 * user-space processes from validating buffers.
39 * It's allowed to leave kernel space with the vt lock held.
40 * If a user-space process dies while having the vt-lock,
41 * it will be released during the file descriptor release. The vt lock
42 * excludes write lock and read lock.
43 *
44 * The suspend mode is used to lock out all TTM users when preparing for
45 * and executing suspend operations.
46 *
47 */
48
49#ifndef _TTM_LOCK_H_
50#define _TTM_LOCK_H_
51
52#include "ttm/ttm_object.h"
53#include <linux/wait.h>
54#include <asm/atomic.h>
55
56/**
57 * struct ttm_lock
58 *
59 * @base: ttm base object used solely to release the lock if the client
60 * holding the lock dies.
61 * @queue: Queue for processes waiting for lock change-of-status.
62 * @lock: Spinlock protecting some lock members.
63 * @rw: Read-write lock counter. Protected by @lock.
64 * @flags: Lock state. Protected by @lock.
65 * @kill_takers: Boolean whether to kill takers of the lock.
66 * @signal: Signal to send when kill_takers is true.
67 */
68
69struct ttm_lock {
70 struct ttm_base_object base;
71 wait_queue_head_t queue;
72 spinlock_t lock;
73 int32_t rw;
74 uint32_t flags;
75 bool kill_takers;
76 int signal;
77 struct ttm_object_file *vt_holder;
78};
79
80
81/**
82 * ttm_lock_init
83 *
84 * @lock: Pointer to a struct ttm_lock
85 * Initializes the lock.
86 */
87extern void ttm_lock_init(struct ttm_lock *lock);
88
89/**
90 * ttm_read_unlock
91 *
92 * @lock: Pointer to a struct ttm_lock
93 *
94 * Releases a read lock.
95 */
96extern void ttm_read_unlock(struct ttm_lock *lock);
97
98/**
99 * ttm_read_lock
100 *
101 * @lock: Pointer to a struct ttm_lock
102 * @interruptible: Interruptible sleeping while waiting for a lock.
103 *
104 * Takes the lock in read mode.
105 * Returns:
106 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
107 */
108extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
109
110/**
111 * ttm_read_trylock
112 *
113 * @lock: Pointer to a struct ttm_lock
114 * @interruptible: Interruptible sleeping while waiting for a lock.
115 *
116 * Tries to take the lock in read mode. If the lock is already held
117 * in write mode, the function will return -EBUSY. If the lock is held
118 * in vt or suspend mode, the function will sleep until these modes
119 * are unlocked.
120 *
121 * Returns:
122 * -EBUSY The lock was already held in write mode.
123 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
124 */
125extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
126
127/**
128 * ttm_write_unlock
129 *
130 * @lock: Pointer to a struct ttm_lock
131 *
132 * Releases a write lock.
133 */
134extern void ttm_write_unlock(struct ttm_lock *lock);
135
136/**
137 * ttm_write_lock
138 *
139 * @lock: Pointer to a struct ttm_lock
140 * @interruptible: Interruptible sleeping while waiting for a lock.
141 *
142 * Takes the lock in write mode.
143 * Returns:
144 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
145 */
146extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
147
148/**
149 * ttm_lock_downgrade
150 *
151 * @lock: Pointer to a struct ttm_lock
152 *
153 * Downgrades a write lock to a read lock.
154 */
155extern void ttm_lock_downgrade(struct ttm_lock *lock);
156
157/**
158 * ttm_suspend_lock
159 *
160 * @lock: Pointer to a struct ttm_lock
161 *
162 * Takes the lock in suspend mode. Excludes read and write mode.
163 */
164extern void ttm_suspend_lock(struct ttm_lock *lock);
165
166/**
167 * ttm_suspend_unlock
168 *
169 * @lock: Pointer to a struct ttm_lock
170 *
171 * Releases a suspend lock
172 */
173extern void ttm_suspend_unlock(struct ttm_lock *lock);
174
175/**
176 * ttm_vt_lock
177 *
178 * @lock: Pointer to a struct ttm_lock
179 * @interruptible: Interruptible sleeping while waiting for a lock.
180 * @tfile: Pointer to a struct ttm_object_file to register the lock with.
181 *
182 * Takes the lock in vt mode.
183 * Returns:
184 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
185 * -ENOMEM: Out of memory when locking.
186 */
187extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
188 struct ttm_object_file *tfile);
189
190/**
191 * ttm_vt_unlock
192 *
193 * @lock: Pointer to a struct ttm_lock
194 *
195 * Releases a vt lock.
196 * Returns:
197 * -EINVAL If the lock was not held.
198 */
199extern int ttm_vt_unlock(struct ttm_lock *lock);
200
201/**
202 * ttm_write_unlock
203 *
204 * @lock: Pointer to a struct ttm_lock
205 *
206 * Releases a write lock.
207 */
208extern void ttm_write_unlock(struct ttm_lock *lock);
209
210/**
211 * ttm_write_lock
212 *
213 * @lock: Pointer to a struct ttm_lock
214 * @interruptible: Interruptible sleeping while waiting for a lock.
215 *
216 * Takes the lock in write mode.
217 * Returns:
218 * -ERESTARTSYS If interrupted by a signal and interruptible is true.
219 */
220extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
221
222/**
223 * ttm_lock_set_kill
224 *
225 * @lock: Pointer to a struct ttm_lock
226 * @val: Boolean whether to kill processes taking the lock.
227 * @signal: Signal to send to the process taking the lock.
228 *
229 * The kill-when-taking-lock functionality is used to kill processes that keep
230 * on using the TTM functionality when its resources has been taken down, for
231 * example when the X server exits. A typical sequence would look like this:
232 * - X server takes lock in write mode.
233 * - ttm_lock_set_kill() is called with @val set to true.
234 * - As part of X server exit, TTM resources are taken down.
235 * - X server releases the lock on file release.
236 * - Another dri client wants to render, takes the lock and is killed.
237 *
238 */
239static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
240 int signal)
241{
242 lock->kill_takers = val;
243 if (val)
244 lock->signal = signal;
245}
246
247#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 6983a7cf4da4..b199170b3c2c 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -33,6 +33,7 @@
33#include <linux/wait.h> 33#include <linux/wait.h>
34#include <linux/errno.h> 34#include <linux/errno.h>
35#include <linux/kobject.h> 35#include <linux/kobject.h>
36#include <linux/mm.h>
36 37
37/** 38/**
38 * struct ttm_mem_shrink - callback to shrink TTM memory usage. 39 * struct ttm_mem_shrink - callback to shrink TTM memory usage.
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
new file mode 100644
index 000000000000..703ca4db0a29
--- /dev/null
+++ b/include/drm/ttm/ttm_object.h
@@ -0,0 +1,267 @@
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30/** @file ttm_object.h
31 *
32 * Base- and reference object implementation for the various
33 * ttm objects. Implements reference counting, minimal security checks
34 * and release on file close.
35 */
36
37#ifndef _TTM_OBJECT_H_
38#define _TTM_OBJECT_H_
39
40#include <linux/list.h>
41#include "drm_hashtab.h"
42#include <linux/kref.h>
43#include <ttm/ttm_memory.h>
44
45/**
46 * enum ttm_ref_type
47 *
48 * Describes what type of reference a ref object holds.
49 *
50 * TTM_REF_USAGE is a simple refcount on a base object.
51 *
52 * TTM_REF_SYNCCPU_READ is a SYNCCPU_READ reference on a
53 * buffer object.
54 *
55 * TTM_REF_SYNCCPU_WRITE is a SYNCCPU_WRITE reference on a
56 * buffer object.
57 *
58 */
59
60enum ttm_ref_type {
61 TTM_REF_USAGE,
62 TTM_REF_SYNCCPU_READ,
63 TTM_REF_SYNCCPU_WRITE,
64 TTM_REF_NUM
65};
66
67/**
68 * enum ttm_object_type
69 *
70 * One entry per ttm object type.
71 * Device-specific types should use the
72 * ttm_driver_typex types.
73 */
74
75enum ttm_object_type {
76 ttm_fence_type,
77 ttm_buffer_type,
78 ttm_lock_type,
79 ttm_driver_type0 = 256,
80 ttm_driver_type1
81};
82
83struct ttm_object_file;
84struct ttm_object_device;
85
86/**
87 * struct ttm_base_object
88 *
89 * @hash: hash entry for the per-device object hash.
90 * @type: derived type this object is base class for.
91 * @shareable: Other ttm_object_files can access this object.
92 *
93 * @tfile: Pointer to ttm_object_file of the creator.
94 * NULL if the object was not created by a user request.
95 * (kernel object).
96 *
97 * @refcount: Number of references to this object, not
98 * including the hash entry. A reference to a base object can
99 * only be held by a ref object.
100 *
101 * @refcount_release: A function to be called when there are
102 * no more references to this object. This function should
103 * destroy the object (or make sure destruction eventually happens),
104 * and when it is called, the object has
105 * already been taken out of the per-device hash. The parameter
106 * "base" should be set to NULL by the function.
107 *
108 * @ref_obj_release: A function to be called when a reference object
109 * with another ttm_ref_type than TTM_REF_USAGE is deleted.
110 * this function may, for example, release a lock held by a user-space
111 * process.
112 *
113 * This struct is intended to be used as a base struct for objects that
114 * are visible to user-space. It provides a global name, race-safe
115 * access and refcounting, minimal access contol and hooks for unref actions.
116 */
117
118struct ttm_base_object {
119 struct drm_hash_item hash;
120 enum ttm_object_type object_type;
121 bool shareable;
122 struct ttm_object_file *tfile;
123 struct kref refcount;
124 void (*refcount_release) (struct ttm_base_object **base);
125 void (*ref_obj_release) (struct ttm_base_object *base,
126 enum ttm_ref_type ref_type);
127};
128
129/**
130 * ttm_base_object_init
131 *
132 * @tfile: Pointer to a struct ttm_object_file.
133 * @base: The struct ttm_base_object to initialize.
134 * @shareable: This object is shareable with other applcations.
135 * (different @tfile pointers.)
136 * @type: The object type.
137 * @refcount_release: See the struct ttm_base_object description.
138 * @ref_obj_release: See the struct ttm_base_object description.
139 *
140 * Initializes a struct ttm_base_object.
141 */
142
143extern int ttm_base_object_init(struct ttm_object_file *tfile,
144 struct ttm_base_object *base,
145 bool shareable,
146 enum ttm_object_type type,
147 void (*refcount_release) (struct ttm_base_object
148 **),
149 void (*ref_obj_release) (struct ttm_base_object
150 *,
151 enum ttm_ref_type
152 ref_type));
153
154/**
155 * ttm_base_object_lookup
156 *
157 * @tfile: Pointer to a struct ttm_object_file.
158 * @key: Hash key
159 *
160 * Looks up a struct ttm_base_object with the key @key.
161 * Also verifies that the object is visible to the application, by
162 * comparing the @tfile argument and checking the object shareable flag.
163 */
164
165extern struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file
166 *tfile, uint32_t key);
167
168/**
169 * ttm_base_object_unref
170 *
171 * @p_base: Pointer to a pointer referncing a struct ttm_base_object.
172 *
173 * Decrements the base object refcount and clears the pointer pointed to by
174 * p_base.
175 */
176
177extern void ttm_base_object_unref(struct ttm_base_object **p_base);
178
179/**
180 * ttm_ref_object_add.
181 *
182 * @tfile: A struct ttm_object_file representing the application owning the
183 * ref_object.
184 * @base: The base object to reference.
185 * @ref_type: The type of reference.
186 * @existed: Upon completion, indicates that an identical reference object
187 * already existed, and the refcount was upped on that object instead.
188 *
189 * Adding a ref object to a base object is basically like referencing the
190 * base object, but a user-space application holds the reference. When the
191 * file corresponding to @tfile is closed, all its reference objects are
192 * deleted. A reference object can have different types depending on what
193 * it's intended for. It can be refcounting to prevent object destruction,
194 * When user-space takes a lock, it can add a ref object to that lock to
195 * make sure the lock is released if the application dies. A ref object
196 * will hold a single reference on a base object.
197 */
198extern int ttm_ref_object_add(struct ttm_object_file *tfile,
199 struct ttm_base_object *base,
200 enum ttm_ref_type ref_type, bool *existed);
201/**
202 * ttm_ref_object_base_unref
203 *
204 * @key: Key representing the base object.
205 * @ref_type: Ref type of the ref object to be dereferenced.
206 *
207 * Unreference a ref object with type @ref_type
208 * on the base object identified by @key. If there are no duplicate
209 * references, the ref object will be destroyed and the base object
210 * will be unreferenced.
211 */
212extern int ttm_ref_object_base_unref(struct ttm_object_file *tfile,
213 unsigned long key,
214 enum ttm_ref_type ref_type);
215
216/**
217 * ttm_object_file_init - initialize a struct ttm_object file
218 *
219 * @tdev: A struct ttm_object device this file is initialized on.
220 * @hash_order: Order of the hash table used to hold the reference objects.
221 *
222 * This is typically called by the file_ops::open function.
223 */
224
225extern struct ttm_object_file *ttm_object_file_init(struct ttm_object_device
226 *tdev,
227 unsigned int hash_order);
228
229/**
230 * ttm_object_file_release - release data held by a ttm_object_file
231 *
232 * @p_tfile: Pointer to pointer to the ttm_object_file object to release.
233 * *p_tfile will be set to NULL by this function.
234 *
235 * Releases all data associated by a ttm_object_file.
236 * Typically called from file_ops::release. The caller must
237 * ensure that there are no concurrent users of tfile.
238 */
239
240extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
241
242/**
243 * ttm_object device init - initialize a struct ttm_object_device
244 *
245 * @hash_order: Order of hash table used to hash the base objects.
246 *
247 * This function is typically called on device initialization to prepare
248 * data structures needed for ttm base and ref objects.
249 */
250
251extern struct ttm_object_device *ttm_object_device_init
252 (struct ttm_mem_global *mem_glob, unsigned int hash_order);
253
254/**
255 * ttm_object_device_release - release data held by a ttm_object_device
256 *
257 * @p_tdev: Pointer to pointer to the ttm_object_device object to release.
258 * *p_tdev will be set to NULL by this function.
259 *
260 * Releases all data associated by a ttm_object_device.
261 * Typically called from driver::unload before the destruction of the
262 * device private data structure.
263 */
264
265extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
266
267#endif
diff --git a/include/drm/via_drm.h b/include/drm/via_drm.h
index 170786e5c2ff..fd11a5bd892d 100644
--- a/include/drm/via_drm.h
+++ b/include/drm/via_drm.h
@@ -24,7 +24,7 @@
24#ifndef _VIA_DRM_H_ 24#ifndef _VIA_DRM_H_
25#define _VIA_DRM_H_ 25#define _VIA_DRM_H_
26 26
27#include <linux/types.h> 27#include "drm.h"
28 28
29/* WARNING: These defines must be the same as what the Xserver uses. 29/* WARNING: These defines must be the same as what the Xserver uses.
30 * if you change them, you must change the defines in the Xserver. 30 * if you change them, you must change the defines in the Xserver.
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ce520402e840..3fe02cf8b65a 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -151,6 +151,8 @@ struct nilfs_super_root {
151#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ 151#define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */
152#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order 152#define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order
153 semantics also for data */ 153 semantics also for data */
154#define NILFS_MOUNT_NORECOVERY 0x4000 /* Disable write access during
155 mount-time recovery */
154 156
155 157
156/** 158/**
@@ -403,6 +405,28 @@ struct nilfs_segment_summary {
403#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */ 405#define NILFS_SS_GC 0x0010 /* segment written for cleaner operation */
404 406
405/** 407/**
408 * struct nilfs_btree_node - B-tree node
409 * @bn_flags: flags
410 * @bn_level: level
411 * @bn_nchildren: number of children
412 * @bn_pad: padding
413 */
414struct nilfs_btree_node {
415 __u8 bn_flags;
416 __u8 bn_level;
417 __le16 bn_nchildren;
418 __le32 bn_pad;
419};
420
421/* flags */
422#define NILFS_BTREE_NODE_ROOT 0x01
423
424/* level */
425#define NILFS_BTREE_LEVEL_DATA 0
426#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
427#define NILFS_BTREE_LEVEL_MAX 14
428
429/**
406 * struct nilfs_palloc_group_desc - block group descriptor 430 * struct nilfs_palloc_group_desc - block group descriptor
407 * @pg_nfrees: number of free entries in block group 431 * @pg_nfrees: number of free entries in block group
408 */ 432 */
diff --git a/include/linux/omapfb.h b/include/linux/omapfb.h
new file mode 100644
index 000000000000..f46c40ac6d45
--- /dev/null
+++ b/include/linux/omapfb.h
@@ -0,0 +1,251 @@
1/*
2 * File: include/linux/omapfb.h
3 *
4 * Framebuffer driver for TI OMAP boards
5 *
6 * Copyright (C) 2004 Nokia Corporation
7 * Author: Imre Deak <imre.deak@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write to the Free Software Foundation, Inc.,
21 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef __LINUX_OMAPFB_H__
25#define __LINUX_OMAPFB_H__
26
27#include <linux/fb.h>
28#include <linux/ioctl.h>
29#include <linux/types.h>
30
31/* IOCTL commands. */
32
33#define OMAP_IOW(num, dtype) _IOW('O', num, dtype)
34#define OMAP_IOR(num, dtype) _IOR('O', num, dtype)
35#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype)
36#define OMAP_IO(num) _IO('O', num)
37
38#define OMAPFB_MIRROR OMAP_IOW(31, int)
39#define OMAPFB_SYNC_GFX OMAP_IO(37)
40#define OMAPFB_VSYNC OMAP_IO(38)
41#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, int)
42#define OMAPFB_GET_CAPS OMAP_IOR(42, struct omapfb_caps)
43#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, int)
44#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
45#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
46#define OMAPFB_UPDATE_WINDOW_OLD OMAP_IOW(47, struct omapfb_update_window_old)
47#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key)
48#define OMAPFB_GET_COLOR_KEY OMAP_IOW(51, struct omapfb_color_key)
49#define OMAPFB_SETUP_PLANE OMAP_IOW(52, struct omapfb_plane_info)
50#define OMAPFB_QUERY_PLANE OMAP_IOW(53, struct omapfb_plane_info)
51#define OMAPFB_UPDATE_WINDOW OMAP_IOW(54, struct omapfb_update_window)
52#define OMAPFB_SETUP_MEM OMAP_IOW(55, struct omapfb_mem_info)
53#define OMAPFB_QUERY_MEM OMAP_IOW(56, struct omapfb_mem_info)
54#define OMAPFB_WAITFORVSYNC OMAP_IO(57)
55#define OMAPFB_MEMORY_READ OMAP_IOR(58, struct omapfb_memory_read)
56#define OMAPFB_GET_OVERLAY_COLORMODE OMAP_IOR(59, struct omapfb_ovl_colormode)
57#define OMAPFB_WAITFORGO OMAP_IO(60)
58#define OMAPFB_GET_VRAM_INFO OMAP_IOR(61, struct omapfb_vram_info)
59#define OMAPFB_SET_TEARSYNC OMAP_IOW(62, struct omapfb_tearsync_info)
60
61#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
62#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
63#define OMAPFB_CAPS_PANEL_MASK 0xff000000
64
65#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000
66#define OMAPFB_CAPS_TEARSYNC 0x00002000
67#define OMAPFB_CAPS_PLANE_RELOCATE_MEM 0x00004000
68#define OMAPFB_CAPS_PLANE_SCALE 0x00008000
69#define OMAPFB_CAPS_WINDOW_PIXEL_DOUBLE 0x00010000
70#define OMAPFB_CAPS_WINDOW_SCALE 0x00020000
71#define OMAPFB_CAPS_WINDOW_OVERLAY 0x00040000
72#define OMAPFB_CAPS_WINDOW_ROTATE 0x00080000
73#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000
74
75/* Values from DSP must map to lower 16-bits */
76#define OMAPFB_FORMAT_MASK 0x00ff
77#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100
78#define OMAPFB_FORMAT_FLAG_TEARSYNC 0x0200
79#define OMAPFB_FORMAT_FLAG_FORCE_VSYNC 0x0400
80#define OMAPFB_FORMAT_FLAG_ENABLE_OVERLAY 0x0800
81#define OMAPFB_FORMAT_FLAG_DISABLE_OVERLAY 0x1000
82
83#define OMAPFB_MEMTYPE_SDRAM 0
84#define OMAPFB_MEMTYPE_SRAM 1
85#define OMAPFB_MEMTYPE_MAX 1
86
87enum omapfb_color_format {
88 OMAPFB_COLOR_RGB565 = 0,
89 OMAPFB_COLOR_YUV422,
90 OMAPFB_COLOR_YUV420,
91 OMAPFB_COLOR_CLUT_8BPP,
92 OMAPFB_COLOR_CLUT_4BPP,
93 OMAPFB_COLOR_CLUT_2BPP,
94 OMAPFB_COLOR_CLUT_1BPP,
95 OMAPFB_COLOR_RGB444,
96 OMAPFB_COLOR_YUY422,
97
98 OMAPFB_COLOR_ARGB16,
99 OMAPFB_COLOR_RGB24U, /* RGB24, 32-bit container */
100 OMAPFB_COLOR_RGB24P, /* RGB24, 24-bit container */
101 OMAPFB_COLOR_ARGB32,
102 OMAPFB_COLOR_RGBA32,
103 OMAPFB_COLOR_RGBX32,
104};
105
106struct omapfb_update_window {
107 __u32 x, y;
108 __u32 width, height;
109 __u32 format;
110 __u32 out_x, out_y;
111 __u32 out_width, out_height;
112 __u32 reserved[8];
113};
114
115struct omapfb_update_window_old {
116 __u32 x, y;
117 __u32 width, height;
118 __u32 format;
119};
120
121enum omapfb_plane {
122 OMAPFB_PLANE_GFX = 0,
123 OMAPFB_PLANE_VID1,
124 OMAPFB_PLANE_VID2,
125};
126
127enum omapfb_channel_out {
128 OMAPFB_CHANNEL_OUT_LCD = 0,
129 OMAPFB_CHANNEL_OUT_DIGIT,
130};
131
132struct omapfb_plane_info {
133 __u32 pos_x;
134 __u32 pos_y;
135 __u8 enabled;
136 __u8 channel_out;
137 __u8 mirror;
138 __u8 reserved1;
139 __u32 out_width;
140 __u32 out_height;
141 __u32 reserved2[12];
142};
143
144struct omapfb_mem_info {
145 __u32 size;
146 __u8 type;
147 __u8 reserved[3];
148};
149
150struct omapfb_caps {
151 __u32 ctrl;
152 __u32 plane_color;
153 __u32 wnd_color;
154};
155
156enum omapfb_color_key_type {
157 OMAPFB_COLOR_KEY_DISABLED = 0,
158 OMAPFB_COLOR_KEY_GFX_DST,
159 OMAPFB_COLOR_KEY_VID_SRC,
160};
161
162struct omapfb_color_key {
163 __u8 channel_out;
164 __u32 background;
165 __u32 trans_key;
166 __u8 key_type;
167};
168
169enum omapfb_update_mode {
170 OMAPFB_UPDATE_DISABLED = 0,
171 OMAPFB_AUTO_UPDATE,
172 OMAPFB_MANUAL_UPDATE
173};
174
175struct omapfb_memory_read {
176 __u16 x;
177 __u16 y;
178 __u16 w;
179 __u16 h;
180 size_t buffer_size;
181 void __user *buffer;
182};
183
184struct omapfb_ovl_colormode {
185 __u8 overlay_idx;
186 __u8 mode_idx;
187 __u32 bits_per_pixel;
188 __u32 nonstd;
189 struct fb_bitfield red;
190 struct fb_bitfield green;
191 struct fb_bitfield blue;
192 struct fb_bitfield transp;
193};
194
195struct omapfb_vram_info {
196 __u32 total;
197 __u32 free;
198 __u32 largest_free_block;
199 __u32 reserved[5];
200};
201
202struct omapfb_tearsync_info {
203 __u8 enabled;
204 __u8 reserved1[3];
205 __u16 line;
206 __u16 reserved2;
207};
208
209#ifdef __KERNEL__
210
211#include <plat/board.h>
212
213#ifdef CONFIG_ARCH_OMAP1
214#define OMAPFB_PLANE_NUM 1
215#else
216#define OMAPFB_PLANE_NUM 3
217#endif
218
219struct omapfb_mem_region {
220 u32 paddr;
221 void __iomem *vaddr;
222 unsigned long size;
223 u8 type; /* OMAPFB_PLANE_MEM_* */
224 enum omapfb_color_format format;/* OMAPFB_COLOR_* */
225 unsigned format_used:1; /* Must be set when format is set.
226 * Needed b/c of the badly chosen 0
227 * base for OMAPFB_COLOR_* values
228 */
229 unsigned alloc:1; /* allocated by the driver */
230 unsigned map:1; /* kernel mapped by the driver */
231};
232
233struct omapfb_mem_desc {
234 int region_cnt;
235 struct omapfb_mem_region region[OMAPFB_PLANE_NUM];
236};
237
238struct omapfb_platform_data {
239 struct omap_lcd_config lcd;
240 struct omapfb_mem_desc mem_desc;
241 void *ctrl_platform_data;
242};
243
244/* in arch/arm/plat-omap/fb.c */
245extern void omapfb_set_platform_data(struct omapfb_platform_data *data);
246extern void omapfb_set_ctrl_platform_data(void *pdata);
247extern void omapfb_reserve_sdram(void);
248
249#endif
250
251#endif /* __OMAPFB_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f5c7cd343e56..04771b9c3316 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -218,6 +218,7 @@ struct pci_dev {
218 unsigned int class; /* 3 bytes: (base,sub,prog-if) */ 218 unsigned int class; /* 3 bytes: (base,sub,prog-if) */
219 u8 revision; /* PCI revision, low byte of class word */ 219 u8 revision; /* PCI revision, low byte of class word */
220 u8 hdr_type; /* PCI header type (`multi' flag masked out) */ 220 u8 hdr_type; /* PCI header type (`multi' flag masked out) */
221 u8 pcie_cap; /* PCI-E capability offset */
221 u8 pcie_type; /* PCI-E device/port type */ 222 u8 pcie_type; /* PCI-E device/port type */
222 u8 rom_base_reg; /* which config register controls the ROM */ 223 u8 rom_base_reg; /* which config register controls the ROM */
223 u8 pin; /* which interrupt pin this device uses */ 224 u8 pin; /* which interrupt pin this device uses */
@@ -280,6 +281,7 @@ struct pci_dev {
280 unsigned int is_virtfn:1; 281 unsigned int is_virtfn:1;
281 unsigned int reset_fn:1; 282 unsigned int reset_fn:1;
282 unsigned int is_hotplug_bridge:1; 283 unsigned int is_hotplug_bridge:1;
284 unsigned int aer_firmware_first:1;
283 pci_dev_flags_t dev_flags; 285 pci_dev_flags_t dev_flags;
284 atomic_t enable_cnt; /* pci_enable_device has been called */ 286 atomic_t enable_cnt; /* pci_enable_device has been called */
285 287
@@ -635,7 +637,13 @@ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device,
635 unsigned int ss_vendor, unsigned int ss_device, 637 unsigned int ss_vendor, unsigned int ss_device,
636 struct pci_dev *from); 638 struct pci_dev *from);
637struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); 639struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
638struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn); 640struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
641 unsigned int devfn);
642static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus,
643 unsigned int devfn)
644{
645 return pci_get_domain_bus_and_slot(0, bus, devfn);
646}
639struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from); 647struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
640int pci_dev_present(const struct pci_device_id *ids); 648int pci_dev_present(const struct pci_device_id *ids);
641 649
@@ -701,6 +709,7 @@ void pci_disable_device(struct pci_dev *dev);
701void pci_set_master(struct pci_dev *dev); 709void pci_set_master(struct pci_dev *dev);
702void pci_clear_master(struct pci_dev *dev); 710void pci_clear_master(struct pci_dev *dev);
703int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); 711int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state);
712int pci_set_cacheline_size(struct pci_dev *dev);
704#define HAVE_PCI_SET_MWI 713#define HAVE_PCI_SET_MWI
705int __must_check pci_set_mwi(struct pci_dev *dev); 714int __must_check pci_set_mwi(struct pci_dev *dev);
706int pci_try_set_mwi(struct pci_dev *dev); 715int pci_try_set_mwi(struct pci_dev *dev);
@@ -1246,6 +1255,8 @@ extern int pci_pci_problems;
1246 1255
1247extern unsigned long pci_cardbus_io_size; 1256extern unsigned long pci_cardbus_io_size;
1248extern unsigned long pci_cardbus_mem_size; 1257extern unsigned long pci_cardbus_mem_size;
1258extern u8 pci_dfl_cache_line_size;
1259extern u8 pci_cache_line_size;
1249 1260
1250extern unsigned long pci_hotplug_io_size; 1261extern unsigned long pci_hotplug_io_size;
1251extern unsigned long pci_hotplug_mem_size; 1262extern unsigned long pci_hotplug_mem_size;
@@ -1290,5 +1301,34 @@ extern void pci_hp_create_module_link(struct pci_slot *pci_slot);
1290extern void pci_hp_remove_module_link(struct pci_slot *pci_slot); 1301extern void pci_hp_remove_module_link(struct pci_slot *pci_slot);
1291#endif 1302#endif
1292 1303
1304/**
1305 * pci_pcie_cap - get the saved PCIe capability offset
1306 * @dev: PCI device
1307 *
1308 * PCIe capability offset is calculated at PCI device initialization
1309 * time and saved in the data structure. This function returns saved
1310 * PCIe capability offset. Using this instead of pci_find_capability()
1311 * reduces unnecessary search in the PCI configuration space. If you
1312 * need to calculate PCIe capability offset from raw device for some
1313 * reasons, please use pci_find_capability() instead.
1314 */
1315static inline int pci_pcie_cap(struct pci_dev *dev)
1316{
1317 return dev->pcie_cap;
1318}
1319
1320/**
1321 * pci_is_pcie - check if the PCI device is PCI Express capable
1322 * @dev: PCI device
1323 *
1324 * Retrun true if the PCI device is PCI Express capable, false otherwise.
1325 */
1326static inline bool pci_is_pcie(struct pci_dev *dev)
1327{
1328 return !!pci_pcie_cap(dev);
1329}
1330
1331void pci_request_acs(void);
1332
1293#endif /* __KERNEL__ */ 1333#endif /* __KERNEL__ */
1294#endif /* LINUX_PCI_H */ 1334#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_regs.h b/include/linux/pci_regs.h
index dd0bed4f1cf0..9f2ad0aa3c39 100644
--- a/include/linux/pci_regs.h
+++ b/include/linux/pci_regs.h
@@ -365,6 +365,11 @@
365#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */ 365#define PCI_X_STATUS_266MHZ 0x40000000 /* 266 MHz capable */
366#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */ 366#define PCI_X_STATUS_533MHZ 0x80000000 /* 533 MHz capable */
367 367
368/* PCI Bridge Subsystem ID registers */
369
370#define PCI_SSVID_VENDOR_ID 4 /* PCI-Bridge subsystem vendor id register */
371#define PCI_SSVID_DEVICE_ID 6 /* PCI-Bridge subsystem device id register */
372
368/* PCI Express capability registers */ 373/* PCI Express capability registers */
369 374
370#define PCI_EXP_FLAGS 2 /* Capabilities register */ 375#define PCI_EXP_FLAGS 2 /* Capabilities register */
@@ -502,6 +507,7 @@
502#define PCI_EXT_CAP_ID_VC 2 507#define PCI_EXT_CAP_ID_VC 2
503#define PCI_EXT_CAP_ID_DSN 3 508#define PCI_EXT_CAP_ID_DSN 3
504#define PCI_EXT_CAP_ID_PWR 4 509#define PCI_EXT_CAP_ID_PWR 4
510#define PCI_EXT_CAP_ID_ACS 13
505#define PCI_EXT_CAP_ID_ARI 14 511#define PCI_EXT_CAP_ID_ARI 14
506#define PCI_EXT_CAP_ID_ATS 15 512#define PCI_EXT_CAP_ID_ATS 15
507#define PCI_EXT_CAP_ID_SRIOV 16 513#define PCI_EXT_CAP_ID_SRIOV 16
@@ -662,4 +668,16 @@
662#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */ 668#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
663#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */ 669#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
664 670
671/* Access Control Service */
672#define PCI_ACS_CAP 0x04 /* ACS Capability Register */
673#define PCI_ACS_SV 0x01 /* Source Validation */
674#define PCI_ACS_TB 0x02 /* Translation Blocking */
675#define PCI_ACS_RR 0x04 /* P2P Request Redirect */
676#define PCI_ACS_CR 0x08 /* P2P Completion Redirect */
677#define PCI_ACS_UF 0x10 /* Upstream Forwarding */
678#define PCI_ACS_EC 0x20 /* P2P Egress Control */
679#define PCI_ACS_DT 0x40 /* Direct Translated P2P */
680#define PCI_ACS_CTRL 0x06 /* ACS Control Register */
681#define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */
682
665#endif /* LINUX_PCI_REGS_H */ 683#endif /* LINUX_PCI_REGS_H */
diff --git a/include/linux/pcieport_if.h b/include/linux/pcieport_if.h
index b4c79545330b..6775532b92a9 100644
--- a/include/linux/pcieport_if.h
+++ b/include/linux/pcieport_if.h
@@ -10,10 +10,7 @@
10#define _PCIEPORT_IF_H_ 10#define _PCIEPORT_IF_H_
11 11
12/* Port Type */ 12/* Port Type */
13#define PCIE_RC_PORT 4 /* Root port of RC */ 13#define PCIE_ANY_PORT (~0)
14#define PCIE_SW_UPSTREAM_PORT 5 /* Upstream port of Switch */
15#define PCIE_SW_DOWNSTREAM_PORT 6 /* Downstream port of Switch */
16#define PCIE_ANY_PORT 7
17 14
18/* Service Type */ 15/* Service Type */
19#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */ 16#define PCIE_PORT_SERVICE_PME_SHIFT 0 /* Power Management Event */
@@ -25,17 +22,6 @@
25#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */ 22#define PCIE_PORT_SERVICE_VC_SHIFT 3 /* Virtual Channel */
26#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT) 23#define PCIE_PORT_SERVICE_VC (1 << PCIE_PORT_SERVICE_VC_SHIFT)
27 24
28/* Root/Upstream/Downstream Port's Interrupt Mode */
29#define PCIE_PORT_NO_IRQ (-1)
30#define PCIE_PORT_INTx_MODE 0
31#define PCIE_PORT_MSI_MODE 1
32#define PCIE_PORT_MSIX_MODE 2
33
34struct pcie_port_data {
35 int port_type; /* Type of the port */
36 int port_irq_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
37};
38
39struct pcie_device { 25struct pcie_device {
40 int irq; /* Service IRQ/MSI/MSI-X Vector */ 26 int irq; /* Service IRQ/MSI/MSI-X Vector */
41 struct pci_dev *port; /* Root/Upstream/Downstream Port */ 27 struct pci_dev *port; /* Root/Upstream/Downstream Port */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index bc70c5810fec..939a61507ac5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -834,4 +834,8 @@ int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
834asmlinkage long sys_perf_event_open( 834asmlinkage long sys_perf_event_open(
835 struct perf_event_attr __user *attr_uptr, 835 struct perf_event_attr __user *attr_uptr,
836 pid_t pid, int cpu, int group_fd, unsigned long flags); 836 pid_t pid, int cpu, int group_fd, unsigned long flags);
837
838asmlinkage long sys_mmap_pgoff(unsigned long addr, unsigned long len,
839 unsigned long prot, unsigned long flags,
840 unsigned long fd, unsigned long pgoff);
837#endif 841#endif
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a34fa89f1474..e101a2d04d75 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -331,6 +331,7 @@ struct usb_bus {
331 u8 otg_port; /* 0, or number of OTG/HNP port */ 331 u8 otg_port; /* 0, or number of OTG/HNP port */
332 unsigned is_b_host:1; /* true during some HNP roleswitches */ 332 unsigned is_b_host:1; /* true during some HNP roleswitches */
333 unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ 333 unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
334 unsigned sg_tablesize; /* 0 or largest number of sg list entries */
334 335
335 int devnum_next; /* Next open device number in 336 int devnum_next; /* Next open device number in
336 * round-robin allocation */ 337 * round-robin allocation */
@@ -428,11 +429,9 @@ struct usb_tt;
428 * @last_busy: time of last use 429 * @last_busy: time of last use
429 * @autosuspend_delay: in jiffies 430 * @autosuspend_delay: in jiffies
430 * @connect_time: time device was first connected 431 * @connect_time: time device was first connected
431 * @auto_pm: autosuspend/resume in progress
432 * @do_remote_wakeup: remote wakeup should be enabled 432 * @do_remote_wakeup: remote wakeup should be enabled
433 * @reset_resume: needs reset instead of resume 433 * @reset_resume: needs reset instead of resume
434 * @autosuspend_disabled: autosuspend disabled by the user 434 * @autosuspend_disabled: autosuspend disabled by the user
435 * @autoresume_disabled: autoresume disabled by the user
436 * @skip_sys_resume: skip the next system resume 435 * @skip_sys_resume: skip the next system resume
437 * @wusb_dev: if this is a Wireless USB device, link to the WUSB 436 * @wusb_dev: if this is a Wireless USB device, link to the WUSB
438 * specific data for the device. 437 * specific data for the device.
@@ -513,11 +512,9 @@ struct usb_device {
513 int autosuspend_delay; 512 int autosuspend_delay;
514 unsigned long connect_time; 513 unsigned long connect_time;
515 514
516 unsigned auto_pm:1;
517 unsigned do_remote_wakeup:1; 515 unsigned do_remote_wakeup:1;
518 unsigned reset_resume:1; 516 unsigned reset_resume:1;
519 unsigned autosuspend_disabled:1; 517 unsigned autosuspend_disabled:1;
520 unsigned autoresume_disabled:1;
521 unsigned skip_sys_resume:1; 518 unsigned skip_sys_resume:1;
522#endif 519#endif
523 struct wusb_dev *wusb_dev; 520 struct wusb_dev *wusb_dev;
@@ -543,22 +540,20 @@ extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
543 540
544/* USB autosuspend and autoresume */ 541/* USB autosuspend and autoresume */
545#ifdef CONFIG_USB_SUSPEND 542#ifdef CONFIG_USB_SUSPEND
546extern int usb_autopm_set_interface(struct usb_interface *intf);
547extern int usb_autopm_get_interface(struct usb_interface *intf); 543extern int usb_autopm_get_interface(struct usb_interface *intf);
548extern void usb_autopm_put_interface(struct usb_interface *intf); 544extern void usb_autopm_put_interface(struct usb_interface *intf);
549extern int usb_autopm_get_interface_async(struct usb_interface *intf); 545extern int usb_autopm_get_interface_async(struct usb_interface *intf);
550extern void usb_autopm_put_interface_async(struct usb_interface *intf); 546extern void usb_autopm_put_interface_async(struct usb_interface *intf);
551 547
552static inline void usb_autopm_enable(struct usb_interface *intf) 548static inline void usb_autopm_get_interface_no_resume(
549 struct usb_interface *intf)
553{ 550{
554 atomic_set(&intf->pm_usage_cnt, 0); 551 atomic_inc(&intf->pm_usage_cnt);
555 usb_autopm_set_interface(intf);
556} 552}
557 553static inline void usb_autopm_put_interface_no_suspend(
558static inline void usb_autopm_disable(struct usb_interface *intf) 554 struct usb_interface *intf)
559{ 555{
560 atomic_set(&intf->pm_usage_cnt, 1); 556 atomic_dec(&intf->pm_usage_cnt);
561 usb_autopm_set_interface(intf);
562} 557}
563 558
564static inline void usb_mark_last_busy(struct usb_device *udev) 559static inline void usb_mark_last_busy(struct usb_device *udev)
@@ -568,12 +563,8 @@ static inline void usb_mark_last_busy(struct usb_device *udev)
568 563
569#else 564#else
570 565
571static inline int usb_autopm_set_interface(struct usb_interface *intf)
572{ return 0; }
573
574static inline int usb_autopm_get_interface(struct usb_interface *intf) 566static inline int usb_autopm_get_interface(struct usb_interface *intf)
575{ return 0; } 567{ return 0; }
576
577static inline int usb_autopm_get_interface_async(struct usb_interface *intf) 568static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
578{ return 0; } 569{ return 0; }
579 570
@@ -581,9 +572,11 @@ static inline void usb_autopm_put_interface(struct usb_interface *intf)
581{ } 572{ }
582static inline void usb_autopm_put_interface_async(struct usb_interface *intf) 573static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
583{ } 574{ }
584static inline void usb_autopm_enable(struct usb_interface *intf) 575static inline void usb_autopm_get_interface_no_resume(
576 struct usb_interface *intf)
585{ } 577{ }
586static inline void usb_autopm_disable(struct usb_interface *intf) 578static inline void usb_autopm_put_interface_no_suspend(
579 struct usb_interface *intf)
587{ } 580{ }
588static inline void usb_mark_last_busy(struct usb_device *udev) 581static inline void usb_mark_last_busy(struct usb_device *udev)
589{ } 582{ }
@@ -626,6 +619,10 @@ extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
626 unsigned ifnum); 619 unsigned ifnum);
627extern struct usb_host_interface *usb_altnum_to_altsetting( 620extern struct usb_host_interface *usb_altnum_to_altsetting(
628 const struct usb_interface *intf, unsigned int altnum); 621 const struct usb_interface *intf, unsigned int altnum);
622extern struct usb_host_interface *usb_find_alt_setting(
623 struct usb_host_config *config,
624 unsigned int iface_num,
625 unsigned int alt_num);
629 626
630 627
631/** 628/**
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4f6bb3d2160e..738ea1a691cb 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -127,6 +127,7 @@ struct usb_function {
127 /* private: */ 127 /* private: */
128 /* internals */ 128 /* internals */
129 struct list_head list; 129 struct list_head list;
130 DECLARE_BITMAP(endpoints, 32);
130}; 131};
131 132
132int usb_add_function(struct usb_configuration *, struct usb_function *); 133int usb_add_function(struct usb_configuration *, struct usb_function *);
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h
index 2443c0e7a80c..52bb917641f0 100644
--- a/include/linux/usb/otg.h
+++ b/include/linux/usb/otg.h
@@ -33,6 +33,23 @@ enum usb_otg_state {
33 OTG_STATE_A_VBUS_ERR, 33 OTG_STATE_A_VBUS_ERR,
34}; 34};
35 35
36#define USB_OTG_PULLUP_ID (1 << 0)
37#define USB_OTG_PULLDOWN_DP (1 << 1)
38#define USB_OTG_PULLDOWN_DM (1 << 2)
39#define USB_OTG_EXT_VBUS_INDICATOR (1 << 3)
40#define USB_OTG_DRV_VBUS (1 << 4)
41#define USB_OTG_DRV_VBUS_EXT (1 << 5)
42
43struct otg_transceiver;
44
45/* for transceivers connected thru an ULPI interface, the user must
46 * provide access ops
47 */
48struct otg_io_access_ops {
49 int (*read)(struct otg_transceiver *otg, u32 reg);
50 int (*write)(struct otg_transceiver *otg, u32 val, u32 reg);
51};
52
36/* 53/*
37 * the otg driver needs to interact with both device side and host side 54 * the otg driver needs to interact with both device side and host side
38 * usb controllers. it decides which controller is active at a given 55 * usb controllers. it decides which controller is active at a given
@@ -42,6 +59,7 @@ enum usb_otg_state {
42struct otg_transceiver { 59struct otg_transceiver {
43 struct device *dev; 60 struct device *dev;
44 const char *label; 61 const char *label;
62 unsigned int flags;
45 63
46 u8 default_a; 64 u8 default_a;
47 enum usb_otg_state state; 65 enum usb_otg_state state;
@@ -49,10 +67,17 @@ struct otg_transceiver {
49 struct usb_bus *host; 67 struct usb_bus *host;
50 struct usb_gadget *gadget; 68 struct usb_gadget *gadget;
51 69
70 struct otg_io_access_ops *io_ops;
71 void __iomem *io_priv;
72
52 /* to pass extra port status to the root hub */ 73 /* to pass extra port status to the root hub */
53 u16 port_status; 74 u16 port_status;
54 u16 port_change; 75 u16 port_change;
55 76
77 /* initialize/shutdown the OTG controller */
78 int (*init)(struct otg_transceiver *otg);
79 void (*shutdown)(struct otg_transceiver *otg);
80
56 /* bind/unbind the host controller */ 81 /* bind/unbind the host controller */
57 int (*set_host)(struct otg_transceiver *otg, 82 int (*set_host)(struct otg_transceiver *otg,
58 struct usb_bus *host); 83 struct usb_bus *host);
@@ -65,6 +90,10 @@ struct otg_transceiver {
65 int (*set_power)(struct otg_transceiver *otg, 90 int (*set_power)(struct otg_transceiver *otg,
66 unsigned mA); 91 unsigned mA);
67 92
93 /* effective for A-peripheral, ignored for B devices */
94 int (*set_vbus)(struct otg_transceiver *otg,
95 bool enabled);
96
68 /* for non-OTG B devices: set transceiver into suspend mode */ 97 /* for non-OTG B devices: set transceiver into suspend mode */
69 int (*set_suspend)(struct otg_transceiver *otg, 98 int (*set_suspend)(struct otg_transceiver *otg,
70 int suspend); 99 int suspend);
@@ -85,6 +114,38 @@ extern int otg_set_transceiver(struct otg_transceiver *);
85extern void usb_nop_xceiv_register(void); 114extern void usb_nop_xceiv_register(void);
86extern void usb_nop_xceiv_unregister(void); 115extern void usb_nop_xceiv_unregister(void);
87 116
117/* helpers for direct access thru low-level io interface */
118static inline int otg_io_read(struct otg_transceiver *otg, u32 reg)
119{
120 if (otg->io_ops && otg->io_ops->read)
121 return otg->io_ops->read(otg, reg);
122
123 return -EINVAL;
124}
125
126static inline int otg_io_write(struct otg_transceiver *otg, u32 reg, u32 val)
127{
128 if (otg->io_ops && otg->io_ops->write)
129 return otg->io_ops->write(otg, reg, val);
130
131 return -EINVAL;
132}
133
134static inline int
135otg_init(struct otg_transceiver *otg)
136{
137 if (otg->init)
138 return otg->init(otg);
139
140 return 0;
141}
142
143static inline void
144otg_shutdown(struct otg_transceiver *otg)
145{
146 if (otg->shutdown)
147 otg->shutdown(otg);
148}
88 149
89/* for usb host and peripheral controller drivers */ 150/* for usb host and peripheral controller drivers */
90extern struct otg_transceiver *otg_get_transceiver(void); 151extern struct otg_transceiver *otg_get_transceiver(void);
@@ -97,6 +158,12 @@ otg_start_hnp(struct otg_transceiver *otg)
97 return otg->start_hnp(otg); 158 return otg->start_hnp(otg);
98} 159}
99 160
161/* Context: can sleep */
162static inline int
163otg_set_vbus(struct otg_transceiver *otg, bool enabled)
164{
165 return otg->set_vbus(otg, enabled);
166}
100 167
101/* for HCDs */ 168/* for HCDs */
102static inline int 169static inline int
@@ -105,7 +172,6 @@ otg_set_host(struct otg_transceiver *otg, struct usb_bus *host)
105 return otg->set_host(otg, host); 172 return otg->set_host(otg, host);
106} 173}
107 174
108
109/* for usb peripheral controller drivers */ 175/* for usb peripheral controller drivers */
110 176
111/* Context: can sleep */ 177/* Context: can sleep */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
new file mode 100644
index 000000000000..20675c6ebc4d
--- /dev/null
+++ b/include/linux/usb/ulpi.h
@@ -0,0 +1,7 @@
1#ifndef __LINUX_USB_ULPI_H
2#define __LINUX_USB_ULPI_H
3
4struct otg_transceiver *otg_ulpi_create(struct otg_io_access_ops *ops,
5 unsigned int flags);
6
7#endif /* __LINUX_USB_ULPI_H */
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 3d15fb9bc116..a4b947e470a5 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -56,7 +56,9 @@
56 US_FLAG(SANE_SENSE, 0x00008000) \ 56 US_FLAG(SANE_SENSE, 0x00008000) \
57 /* Sane Sense (> 18 bytes) */ \ 57 /* Sane Sense (> 18 bytes) */ \
58 US_FLAG(CAPACITY_OK, 0x00010000) \ 58 US_FLAG(CAPACITY_OK, 0x00010000) \
59 /* READ CAPACITY response is correct */ 59 /* READ CAPACITY response is correct */ \
60 US_FLAG(BAD_SENSE, 0x00020000) \
61 /* Bad Sense (never more than 18 bytes) */
60 62
61#define US_FLAG(name, value) US_FL_##name = value , 63#define US_FLAG(name, value) US_FL_##name = value ,
62enum { US_DO_ALL_FLAGS }; 64enum { US_DO_ALL_FLAGS };
diff --git a/include/xen/xen.h b/include/xen/xen.h
new file mode 100644
index 000000000000..a16402418d31
--- /dev/null
+++ b/include/xen/xen.h
@@ -0,0 +1,32 @@
1#ifndef _XEN_XEN_H
2#define _XEN_XEN_H
3
4enum xen_domain_type {
5 XEN_NATIVE, /* running on bare hardware */
6 XEN_PV_DOMAIN, /* running in a PV domain */
7 XEN_HVM_DOMAIN, /* running in a Xen hvm domain */
8};
9
10#ifdef CONFIG_XEN
11extern enum xen_domain_type xen_domain_type;
12#else
13#define xen_domain_type XEN_NATIVE
14#endif
15
16#define xen_domain() (xen_domain_type != XEN_NATIVE)
17#define xen_pv_domain() (xen_domain() && \
18 xen_domain_type == XEN_PV_DOMAIN)
19#define xen_hvm_domain() (xen_domain() && \
20 xen_domain_type == XEN_HVM_DOMAIN)
21
22#ifdef CONFIG_XEN_DOM0
23#include <xen/interface/xen.h>
24#include <asm/xen/hypervisor.h>
25
26#define xen_initial_domain() (xen_pv_domain() && \
27 xen_start_info->flags & SIF_INITDOMAIN)
28#else /* !CONFIG_XEN_DOM0 */
29#define xen_initial_domain() (0)
30#endif /* CONFIG_XEN_DOM0 */
31
32#endif /* _XEN_XEN_H */
diff --git a/ipc/shm.c b/ipc/shm.c
index 464694e0aa4a..11bec626c228 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -290,28 +290,28 @@ static unsigned long shm_get_unmapped_area(struct file *file,
290 unsigned long flags) 290 unsigned long flags)
291{ 291{
292 struct shm_file_data *sfd = shm_file_data(file); 292 struct shm_file_data *sfd = shm_file_data(file);
293 return get_unmapped_area(sfd->file, addr, len, pgoff, flags); 293 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
294} 294 pgoff, flags);
295
296int is_file_shm_hugepages(struct file *file)
297{
298 int ret = 0;
299
300 if (file->f_op == &shm_file_operations) {
301 struct shm_file_data *sfd;
302 sfd = shm_file_data(file);
303 ret = is_file_hugepages(sfd->file);
304 }
305 return ret;
306} 295}
307 296
308static const struct file_operations shm_file_operations = { 297static const struct file_operations shm_file_operations = {
309 .mmap = shm_mmap, 298 .mmap = shm_mmap,
310 .fsync = shm_fsync, 299 .fsync = shm_fsync,
311 .release = shm_release, 300 .release = shm_release,
301};
302
303static const struct file_operations shm_file_operations_huge = {
304 .mmap = shm_mmap,
305 .fsync = shm_fsync,
306 .release = shm_release,
312 .get_unmapped_area = shm_get_unmapped_area, 307 .get_unmapped_area = shm_get_unmapped_area,
313}; 308};
314 309
310int is_file_shm_hugepages(struct file *file)
311{
312 return file->f_op == &shm_file_operations_huge;
313}
314
315static const struct vm_operations_struct shm_vm_ops = { 315static const struct vm_operations_struct shm_vm_ops = {
316 .open = shm_open, /* callback for a new vm-area open */ 316 .open = shm_open, /* callback for a new vm-area open */
317 .close = shm_close, /* callback for when the vm-area is released */ 317 .close = shm_close, /* callback for when the vm-area is released */
@@ -889,7 +889,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
889 if (!sfd) 889 if (!sfd)
890 goto out_put_dentry; 890 goto out_put_dentry;
891 891
892 file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations); 892 file = alloc_file(path.mnt, path.dentry, f_mode,
893 is_file_hugepages(shp->shm_file) ?
894 &shm_file_operations_huge :
895 &shm_file_operations);
893 if (!file) 896 if (!file)
894 goto out_free; 897 goto out_free;
895 ima_counts_get(file); 898 ima_counts_get(file);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 7d7014634022..2eb517e23514 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -129,6 +129,7 @@ struct task_struct *kgdb_usethread;
129struct task_struct *kgdb_contthread; 129struct task_struct *kgdb_contthread;
130 130
131int kgdb_single_step; 131int kgdb_single_step;
132pid_t kgdb_sstep_pid;
132 133
133/* Our I/O buffers. */ 134/* Our I/O buffers. */
134static char remcom_in_buffer[BUFMAX]; 135static char remcom_in_buffer[BUFMAX];
@@ -541,12 +542,17 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
541 */ 542 */
542 if (tid == 0 || tid == -1) 543 if (tid == 0 || tid == -1)
543 tid = -atomic_read(&kgdb_active) - 2; 544 tid = -atomic_read(&kgdb_active) - 2;
544 if (tid < 0) { 545 if (tid < -1 && tid > -NR_CPUS - 2) {
545 if (kgdb_info[-tid - 2].task) 546 if (kgdb_info[-tid - 2].task)
546 return kgdb_info[-tid - 2].task; 547 return kgdb_info[-tid - 2].task;
547 else 548 else
548 return idle_task(-tid - 2); 549 return idle_task(-tid - 2);
549 } 550 }
551 if (tid <= 0) {
552 printk(KERN_ERR "KGDB: Internal thread select error\n");
553 dump_stack();
554 return NULL;
555 }
550 556
551 /* 557 /*
552 * find_task_by_pid_ns() does not take the tasklist lock anymore 558 * find_task_by_pid_ns() does not take the tasklist lock anymore
@@ -619,7 +625,8 @@ static void kgdb_flush_swbreak_addr(unsigned long addr)
619static int kgdb_activate_sw_breakpoints(void) 625static int kgdb_activate_sw_breakpoints(void)
620{ 626{
621 unsigned long addr; 627 unsigned long addr;
622 int error = 0; 628 int error;
629 int ret = 0;
623 int i; 630 int i;
624 631
625 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 632 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -629,13 +636,16 @@ static int kgdb_activate_sw_breakpoints(void)
629 addr = kgdb_break[i].bpt_addr; 636 addr = kgdb_break[i].bpt_addr;
630 error = kgdb_arch_set_breakpoint(addr, 637 error = kgdb_arch_set_breakpoint(addr,
631 kgdb_break[i].saved_instr); 638 kgdb_break[i].saved_instr);
632 if (error) 639 if (error) {
633 return error; 640 ret = error;
641 printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
642 continue;
643 }
634 644
635 kgdb_flush_swbreak_addr(addr); 645 kgdb_flush_swbreak_addr(addr);
636 kgdb_break[i].state = BP_ACTIVE; 646 kgdb_break[i].state = BP_ACTIVE;
637 } 647 }
638 return 0; 648 return ret;
639} 649}
640 650
641static int kgdb_set_sw_break(unsigned long addr) 651static int kgdb_set_sw_break(unsigned long addr)
@@ -682,7 +692,8 @@ static int kgdb_set_sw_break(unsigned long addr)
682static int kgdb_deactivate_sw_breakpoints(void) 692static int kgdb_deactivate_sw_breakpoints(void)
683{ 693{
684 unsigned long addr; 694 unsigned long addr;
685 int error = 0; 695 int error;
696 int ret = 0;
686 int i; 697 int i;
687 698
688 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { 699 for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
@@ -691,13 +702,15 @@ static int kgdb_deactivate_sw_breakpoints(void)
691 addr = kgdb_break[i].bpt_addr; 702 addr = kgdb_break[i].bpt_addr;
692 error = kgdb_arch_remove_breakpoint(addr, 703 error = kgdb_arch_remove_breakpoint(addr,
693 kgdb_break[i].saved_instr); 704 kgdb_break[i].saved_instr);
694 if (error) 705 if (error) {
695 return error; 706 printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
707 ret = error;
708 }
696 709
697 kgdb_flush_swbreak_addr(addr); 710 kgdb_flush_swbreak_addr(addr);
698 kgdb_break[i].state = BP_SET; 711 kgdb_break[i].state = BP_SET;
699 } 712 }
700 return 0; 713 return ret;
701} 714}
702 715
703static int kgdb_remove_sw_break(unsigned long addr) 716static int kgdb_remove_sw_break(unsigned long addr)
@@ -1204,8 +1217,10 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks)
1204 return 1; 1217 return 1;
1205 1218
1206 } else { 1219 } else {
1207 error_packet(remcom_out_buffer, -EINVAL); 1220 kgdb_msg_write("KGDB only knows signal 9 (pass)"
1208 return 0; 1221 " and 15 (pass and disconnect)\n"
1222 "Executing a continue without signal passing\n", 0);
1223 remcom_in_buffer[0] = 'c';
1209 } 1224 }
1210 1225
1211 /* Indicate fall through */ 1226 /* Indicate fall through */
@@ -1395,6 +1410,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
1395 struct kgdb_state kgdb_var; 1410 struct kgdb_state kgdb_var;
1396 struct kgdb_state *ks = &kgdb_var; 1411 struct kgdb_state *ks = &kgdb_var;
1397 unsigned long flags; 1412 unsigned long flags;
1413 int sstep_tries = 100;
1398 int error = 0; 1414 int error = 0;
1399 int i, cpu; 1415 int i, cpu;
1400 1416
@@ -1425,13 +1441,14 @@ acquirelock:
1425 cpu_relax(); 1441 cpu_relax();
1426 1442
1427 /* 1443 /*
1428 * Do not start the debugger connection on this CPU if the last 1444 * For single stepping, try to only enter on the processor
1429 * instance of the exception handler wanted to come into the 1445 * that was single stepping. To gaurd against a deadlock, the
1430 * debugger on a different CPU via a single step 1446 * kernel will only try for the value of sstep_tries before
1447 * giving up and continuing on.
1431 */ 1448 */
1432 if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && 1449 if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
1433 atomic_read(&kgdb_cpu_doing_single_step) != cpu) { 1450 (kgdb_info[cpu].task &&
1434 1451 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
1435 atomic_set(&kgdb_active, -1); 1452 atomic_set(&kgdb_active, -1);
1436 touch_softlockup_watchdog(); 1453 touch_softlockup_watchdog();
1437 clocksource_touch_watchdog(); 1454 clocksource_touch_watchdog();
@@ -1524,6 +1541,13 @@ acquirelock:
1524 } 1541 }
1525 1542
1526kgdb_restore: 1543kgdb_restore:
1544 if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
1545 int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
1546 if (kgdb_info[sstep_cpu].task)
1547 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
1548 else
1549 kgdb_sstep_pid = 0;
1550 }
1527 /* Free kgdb_active */ 1551 /* Free kgdb_active */
1528 atomic_set(&kgdb_active, -1); 1552 atomic_set(&kgdb_active, -1);
1529 touch_softlockup_watchdog(); 1553 touch_softlockup_watchdog();
diff --git a/kernel/resource.c b/kernel/resource.c
index fb11a58b9594..dc15686b7a77 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -308,35 +308,37 @@ static int find_resource(struct resource *root, struct resource *new,
308 void *alignf_data) 308 void *alignf_data)
309{ 309{
310 struct resource *this = root->child; 310 struct resource *this = root->child;
311 resource_size_t start, end;
311 312
312 new->start = root->start; 313 start = root->start;
313 /* 314 /*
314 * Skip past an allocated resource that starts at 0, since the assignment 315 * Skip past an allocated resource that starts at 0, since the assignment
315 * of this->start - 1 to new->end below would cause an underflow. 316 * of this->start - 1 to new->end below would cause an underflow.
316 */ 317 */
317 if (this && this->start == 0) { 318 if (this && this->start == 0) {
318 new->start = this->end + 1; 319 start = this->end + 1;
319 this = this->sibling; 320 this = this->sibling;
320 } 321 }
321 for(;;) { 322 for(;;) {
322 if (this) 323 if (this)
323 new->end = this->start - 1; 324 end = this->start - 1;
324 else 325 else
325 new->end = root->end; 326 end = root->end;
326 if (new->start < min) 327 if (start < min)
327 new->start = min; 328 start = min;
328 if (new->end > max) 329 if (end > max)
329 new->end = max; 330 end = max;
330 new->start = ALIGN(new->start, align); 331 start = ALIGN(start, align);
331 if (alignf) 332 if (alignf)
332 alignf(alignf_data, new, size, align); 333 alignf(alignf_data, new, size, align);
333 if (new->start < new->end && new->end - new->start >= size - 1) { 334 if (start < end && end - start >= size - 1) {
334 new->end = new->start + size - 1; 335 new->start = start;
336 new->end = start + size - 1;
335 return 0; 337 return 0;
336 } 338 }
337 if (!this) 339 if (!this)
338 break; 340 break;
339 new->start = this->end + 1; 341 start = this->end + 1;
340 this = this->sibling; 342 this = this->sibling;
341 } 343 }
342 return -EBUSY; 344 return -EBUSY;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 33bed5e67a21..6438cd5599ee 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -595,37 +595,89 @@ static char *symbol_string(char *buf, char *end, void *ptr,
595} 595}
596 596
597static char *resource_string(char *buf, char *end, struct resource *res, 597static char *resource_string(char *buf, char *end, struct resource *res,
598 struct printf_spec spec) 598 struct printf_spec spec, const char *fmt)
599{ 599{
600#ifndef IO_RSRC_PRINTK_SIZE 600#ifndef IO_RSRC_PRINTK_SIZE
601#define IO_RSRC_PRINTK_SIZE 4 601#define IO_RSRC_PRINTK_SIZE 6
602#endif 602#endif
603 603
604#ifndef MEM_RSRC_PRINTK_SIZE 604#ifndef MEM_RSRC_PRINTK_SIZE
605#define MEM_RSRC_PRINTK_SIZE 8 605#define MEM_RSRC_PRINTK_SIZE 10
606#endif 606#endif
607 struct printf_spec num_spec = { 607 struct printf_spec hex_spec = {
608 .base = 16, 608 .base = 16,
609 .precision = -1, 609 .precision = -1,
610 .flags = SPECIAL | SMALL | ZEROPAD, 610 .flags = SPECIAL | SMALL | ZEROPAD,
611 }; 611 };
612 /* room for the actual numbers, the two "0x", -, [, ] and the final zero */ 612 struct printf_spec dec_spec = {
613 char sym[4*sizeof(resource_size_t) + 8]; 613 .base = 10,
614 .precision = -1,
615 .flags = 0,
616 };
617 struct printf_spec str_spec = {
618 .field_width = -1,
619 .precision = 10,
620 .flags = LEFT,
621 };
622 struct printf_spec flag_spec = {
623 .base = 16,
624 .precision = -1,
625 .flags = SPECIAL | SMALL,
626 };
627
628 /* 32-bit res (sizeof==4): 10 chars in dec, 10 in hex ("0x" + 8)
629 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */
630#define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4)
631#define FLAG_BUF_SIZE (2 * sizeof(res->flags))
632#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]")
633#define RAW_BUF_SIZE sizeof("[mem - flags 0x]")
634 char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE,
635 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)];
636
614 char *p = sym, *pend = sym + sizeof(sym); 637 char *p = sym, *pend = sym + sizeof(sym);
615 int size = -1; 638 int size = -1, addr = 0;
639 int decode = (fmt[0] == 'R') ? 1 : 0;
616 640
617 if (res->flags & IORESOURCE_IO) 641 if (res->flags & IORESOURCE_IO) {
618 size = IO_RSRC_PRINTK_SIZE; 642 size = IO_RSRC_PRINTK_SIZE;
619 else if (res->flags & IORESOURCE_MEM) 643 addr = 1;
644 } else if (res->flags & IORESOURCE_MEM) {
620 size = MEM_RSRC_PRINTK_SIZE; 645 size = MEM_RSRC_PRINTK_SIZE;
646 addr = 1;
647 }
621 648
622 *p++ = '['; 649 *p++ = '[';
623 num_spec.field_width = size; 650 if (res->flags & IORESOURCE_IO)
624 p = number(p, pend, res->start, num_spec); 651 p = string(p, pend, "io ", str_spec);
625 *p++ = '-'; 652 else if (res->flags & IORESOURCE_MEM)
626 p = number(p, pend, res->end, num_spec); 653 p = string(p, pend, "mem ", str_spec);
654 else if (res->flags & IORESOURCE_IRQ)
655 p = string(p, pend, "irq ", str_spec);
656 else if (res->flags & IORESOURCE_DMA)
657 p = string(p, pend, "dma ", str_spec);
658 else {
659 p = string(p, pend, "??? ", str_spec);
660 decode = 0;
661 }
662 hex_spec.field_width = size;
663 p = number(p, pend, res->start, addr ? hex_spec : dec_spec);
664 if (res->start != res->end) {
665 *p++ = '-';
666 p = number(p, pend, res->end, addr ? hex_spec : dec_spec);
667 }
668 if (decode) {
669 if (res->flags & IORESOURCE_MEM_64)
670 p = string(p, pend, " 64bit", str_spec);
671 if (res->flags & IORESOURCE_PREFETCH)
672 p = string(p, pend, " pref", str_spec);
673 if (res->flags & IORESOURCE_DISABLED)
674 p = string(p, pend, " disabled", str_spec);
675 } else {
676 p = string(p, pend, " flags ", str_spec);
677 p = number(p, pend, res->flags, flag_spec);
678 }
627 *p++ = ']'; 679 *p++ = ']';
628 *p = 0; 680 *p = '\0';
629 681
630 return string(buf, end, sym, spec); 682 return string(buf, end, sym, spec);
631} 683}
@@ -801,8 +853,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
801 * - 'f' For simple symbolic function names without offset 853 * - 'f' For simple symbolic function names without offset
802 * - 'S' For symbolic direct pointers with offset 854 * - 'S' For symbolic direct pointers with offset
803 * - 's' For symbolic direct pointers without offset 855 * - 's' For symbolic direct pointers without offset
804 * - 'R' For a struct resource pointer, it prints the range of 856 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
805 * addresses (not the name nor the flags) 857 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
806 * - 'M' For a 6-byte MAC address, it prints the address in the 858 * - 'M' For a 6-byte MAC address, it prints the address in the
807 * usual colon-separated hex notation 859 * usual colon-separated hex notation
808 * - 'm' For a 6-byte MAC address, it prints the hex address without colons 860 * - 'm' For a 6-byte MAC address, it prints the hex address without colons
@@ -833,7 +885,8 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
833 case 'S': 885 case 'S':
834 return symbol_string(buf, end, ptr, spec, *fmt); 886 return symbol_string(buf, end, ptr, spec, *fmt);
835 case 'R': 887 case 'R':
836 return resource_string(buf, end, ptr, spec); 888 case 'r':
889 return resource_string(buf, end, ptr, spec, fmt);
837 case 'M': /* Colon separated: 00:01:02:03:04:05 */ 890 case 'M': /* Colon separated: 00:01:02:03:04:05 */
838 case 'm': /* Contiguous: 000102030405 */ 891 case 'm': /* Contiguous: 000102030405 */
839 return mac_address_string(buf, end, ptr, spec, fmt); 892 return mac_address_string(buf, end, ptr, spec, fmt);
diff --git a/mm/mmap.c b/mm/mmap.c
index 292ddc3cef9c..ed70a68e882a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -931,13 +931,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
931 if (!(flags & MAP_FIXED)) 931 if (!(flags & MAP_FIXED))
932 addr = round_hint_to_min(addr); 932 addr = round_hint_to_min(addr);
933 933
934 error = arch_mmap_check(addr, len, flags);
935 if (error)
936 return error;
937
938 /* Careful about overflows.. */ 934 /* Careful about overflows.. */
939 len = PAGE_ALIGN(len); 935 len = PAGE_ALIGN(len);
940 if (!len || len > TASK_SIZE) 936 if (!len)
941 return -ENOMEM; 937 return -ENOMEM;
942 938
943 /* offset overflow? */ 939 /* offset overflow? */
@@ -948,24 +944,6 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
948 if (mm->map_count > sysctl_max_map_count) 944 if (mm->map_count > sysctl_max_map_count)
949 return -ENOMEM; 945 return -ENOMEM;
950 946
951 if (flags & MAP_HUGETLB) {
952 struct user_struct *user = NULL;
953 if (file)
954 return -EINVAL;
955
956 /*
957 * VM_NORESERVE is used because the reservations will be
958 * taken when vm_ops->mmap() is called
959 * A dummy user value is used because we are not locking
960 * memory so no accounting is necessary
961 */
962 len = ALIGN(len, huge_page_size(&default_hstate));
963 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
964 &user, HUGETLB_ANONHUGE_INODE);
965 if (IS_ERR(file))
966 return PTR_ERR(file);
967 }
968
969 /* Obtain the address to map to. we verify (or select) it and ensure 947 /* Obtain the address to map to. we verify (or select) it and ensure
970 * that it represents a valid section of the address space. 948 * that it represents a valid section of the address space.
971 */ 949 */
@@ -1455,6 +1433,14 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1455 unsigned long (*get_area)(struct file *, unsigned long, 1433 unsigned long (*get_area)(struct file *, unsigned long,
1456 unsigned long, unsigned long, unsigned long); 1434 unsigned long, unsigned long, unsigned long);
1457 1435
1436 unsigned long error = arch_mmap_check(addr, len, flags);
1437 if (error)
1438 return error;
1439
1440 /* Careful about overflows.. */
1441 if (len > TASK_SIZE)
1442 return -ENOMEM;
1443
1458 get_area = current->mm->get_unmapped_area; 1444 get_area = current->mm->get_unmapped_area;
1459 if (file && file->f_op && file->f_op->get_unmapped_area) 1445 if (file && file->f_op && file->f_op->get_unmapped_area)
1460 get_area = file->f_op->get_unmapped_area; 1446 get_area = file->f_op->get_unmapped_area;
@@ -1999,20 +1985,14 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
1999 if (!len) 1985 if (!len)
2000 return addr; 1986 return addr;
2001 1987
2002 if ((addr + len) > TASK_SIZE || (addr + len) < addr)
2003 return -EINVAL;
2004
2005 if (is_hugepage_only_range(mm, addr, len))
2006 return -EINVAL;
2007
2008 error = security_file_mmap(NULL, 0, 0, 0, addr, 1); 1988 error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
2009 if (error) 1989 if (error)
2010 return error; 1990 return error;
2011 1991
2012 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 1992 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2013 1993
2014 error = arch_mmap_check(addr, len, flags); 1994 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2015 if (error) 1995 if (error & ~PAGE_MASK)
2016 return error; 1996 return error;
2017 1997
2018 /* 1998 /*
diff --git a/mm/mremap.c b/mm/mremap.c
index 97bff2547719..845190898d59 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -261,6 +261,137 @@ static unsigned long move_vma(struct vm_area_struct *vma,
261 return new_addr; 261 return new_addr;
262} 262}
263 263
264static struct vm_area_struct *vma_to_resize(unsigned long addr,
265 unsigned long old_len, unsigned long new_len, unsigned long *p)
266{
267 struct mm_struct *mm = current->mm;
268 struct vm_area_struct *vma = find_vma(mm, addr);
269
270 if (!vma || vma->vm_start > addr)
271 goto Efault;
272
273 if (is_vm_hugetlb_page(vma))
274 goto Einval;
275
276 /* We can't remap across vm area boundaries */
277 if (old_len > vma->vm_end - addr)
278 goto Efault;
279
280 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
281 if (new_len > old_len)
282 goto Efault;
283 }
284
285 if (vma->vm_flags & VM_LOCKED) {
286 unsigned long locked, lock_limit;
287 locked = mm->locked_vm << PAGE_SHIFT;
288 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
289 locked += new_len - old_len;
290 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
291 goto Eagain;
292 }
293
294 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
295 goto Enomem;
296
297 if (vma->vm_flags & VM_ACCOUNT) {
298 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
299 if (security_vm_enough_memory(charged))
300 goto Efault;
301 *p = charged;
302 }
303
304 return vma;
305
306Efault: /* very odd choice for most of the cases, but... */
307 return ERR_PTR(-EFAULT);
308Einval:
309 return ERR_PTR(-EINVAL);
310Enomem:
311 return ERR_PTR(-ENOMEM);
312Eagain:
313 return ERR_PTR(-EAGAIN);
314}
315
316static unsigned long mremap_to(unsigned long addr,
317 unsigned long old_len, unsigned long new_addr,
318 unsigned long new_len)
319{
320 struct mm_struct *mm = current->mm;
321 struct vm_area_struct *vma;
322 unsigned long ret = -EINVAL;
323 unsigned long charged = 0;
324 unsigned long map_flags;
325
326 if (new_addr & ~PAGE_MASK)
327 goto out;
328
329 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
330 goto out;
331
332 /* Check if the location we're moving into overlaps the
333 * old location at all, and fail if it does.
334 */
335 if ((new_addr <= addr) && (new_addr+new_len) > addr)
336 goto out;
337
338 if ((addr <= new_addr) && (addr+old_len) > new_addr)
339 goto out;
340
341 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
342 if (ret)
343 goto out;
344
345 ret = do_munmap(mm, new_addr, new_len);
346 if (ret)
347 goto out;
348
349 if (old_len >= new_len) {
350 ret = do_munmap(mm, addr+new_len, old_len - new_len);
351 if (ret && old_len != new_len)
352 goto out;
353 old_len = new_len;
354 }
355
356 vma = vma_to_resize(addr, old_len, new_len, &charged);
357 if (IS_ERR(vma)) {
358 ret = PTR_ERR(vma);
359 goto out;
360 }
361
362 map_flags = MAP_FIXED;
363 if (vma->vm_flags & VM_MAYSHARE)
364 map_flags |= MAP_SHARED;
365
366 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
367 ((addr - vma->vm_start) >> PAGE_SHIFT),
368 map_flags);
369 if (ret & ~PAGE_MASK)
370 goto out1;
371
372 ret = move_vma(vma, addr, old_len, new_len, new_addr);
373 if (!(ret & ~PAGE_MASK))
374 goto out;
375out1:
376 vm_unacct_memory(charged);
377
378out:
379 return ret;
380}
381
382static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
383{
384 unsigned long end = vma->vm_end + delta;
385 if (end < vma->vm_end) /* overflow */
386 return 0;
387 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
388 return 0;
389 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
390 0, MAP_FIXED) & ~PAGE_MASK)
391 return 0;
392 return 1;
393}
394
264/* 395/*
265 * Expand (or shrink) an existing mapping, potentially moving it at the 396 * Expand (or shrink) an existing mapping, potentially moving it at the
266 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space) 397 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
@@ -294,32 +425,10 @@ unsigned long do_mremap(unsigned long addr,
294 if (!new_len) 425 if (!new_len)
295 goto out; 426 goto out;
296 427
297 /* new_addr is only valid if MREMAP_FIXED is specified */
298 if (flags & MREMAP_FIXED) { 428 if (flags & MREMAP_FIXED) {
299 if (new_addr & ~PAGE_MASK) 429 if (flags & MREMAP_MAYMOVE)
300 goto out; 430 ret = mremap_to(addr, old_len, new_addr, new_len);
301 if (!(flags & MREMAP_MAYMOVE)) 431 goto out;
302 goto out;
303
304 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
305 goto out;
306
307 /* Check if the location we're moving into overlaps the
308 * old location at all, and fail if it does.
309 */
310 if ((new_addr <= addr) && (new_addr+new_len) > addr)
311 goto out;
312
313 if ((addr <= new_addr) && (addr+old_len) > new_addr)
314 goto out;
315
316 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
317 if (ret)
318 goto out;
319
320 ret = do_munmap(mm, new_addr, new_len);
321 if (ret)
322 goto out;
323 } 432 }
324 433
325 /* 434 /*
@@ -332,60 +441,23 @@ unsigned long do_mremap(unsigned long addr,
332 if (ret && old_len != new_len) 441 if (ret && old_len != new_len)
333 goto out; 442 goto out;
334 ret = addr; 443 ret = addr;
335 if (!(flags & MREMAP_FIXED) || (new_addr == addr)) 444 goto out;
336 goto out;
337 old_len = new_len;
338 } 445 }
339 446
340 /* 447 /*
341 * Ok, we need to grow.. or relocate. 448 * Ok, we need to grow..
342 */ 449 */
343 ret = -EFAULT; 450 vma = vma_to_resize(addr, old_len, new_len, &charged);
344 vma = find_vma(mm, addr); 451 if (IS_ERR(vma)) {
345 if (!vma || vma->vm_start > addr) 452 ret = PTR_ERR(vma);
346 goto out;
347 if (is_vm_hugetlb_page(vma)) {
348 ret = -EINVAL;
349 goto out;
350 }
351 /* We can't remap across vm area boundaries */
352 if (old_len > vma->vm_end - addr)
353 goto out;
354 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
355 if (new_len > old_len)
356 goto out;
357 }
358 if (vma->vm_flags & VM_LOCKED) {
359 unsigned long locked, lock_limit;
360 locked = mm->locked_vm << PAGE_SHIFT;
361 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
362 locked += new_len - old_len;
363 ret = -EAGAIN;
364 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
365 goto out;
366 }
367 if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
368 ret = -ENOMEM;
369 goto out; 453 goto out;
370 } 454 }
371 455
372 if (vma->vm_flags & VM_ACCOUNT) {
373 charged = (new_len - old_len) >> PAGE_SHIFT;
374 if (security_vm_enough_memory(charged))
375 goto out_nc;
376 }
377
378 /* old_len exactly to the end of the area.. 456 /* old_len exactly to the end of the area..
379 * And we're not relocating the area.
380 */ 457 */
381 if (old_len == vma->vm_end - addr && 458 if (old_len == vma->vm_end - addr) {
382 !((flags & MREMAP_FIXED) && (addr != new_addr)) &&
383 (old_len != new_len || !(flags & MREMAP_MAYMOVE))) {
384 unsigned long max_addr = TASK_SIZE;
385 if (vma->vm_next)
386 max_addr = vma->vm_next->vm_start;
387 /* can we just expand the current mapping? */ 459 /* can we just expand the current mapping? */
388 if (max_addr - addr >= new_len) { 460 if (vma_expandable(vma, new_len - old_len)) {
389 int pages = (new_len - old_len) >> PAGE_SHIFT; 461 int pages = (new_len - old_len) >> PAGE_SHIFT;
390 462
391 vma_adjust(vma, vma->vm_start, 463 vma_adjust(vma, vma->vm_start,
@@ -409,28 +481,27 @@ unsigned long do_mremap(unsigned long addr,
409 */ 481 */
410 ret = -ENOMEM; 482 ret = -ENOMEM;
411 if (flags & MREMAP_MAYMOVE) { 483 if (flags & MREMAP_MAYMOVE) {
412 if (!(flags & MREMAP_FIXED)) { 484 unsigned long map_flags = 0;
413 unsigned long map_flags = 0; 485 if (vma->vm_flags & VM_MAYSHARE)
414 if (vma->vm_flags & VM_MAYSHARE) 486 map_flags |= MAP_SHARED;
415 map_flags |= MAP_SHARED; 487
416 488 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
417 new_addr = get_unmapped_area(vma->vm_file, 0, new_len, 489 vma->vm_pgoff +
418 vma->vm_pgoff, map_flags); 490 ((addr - vma->vm_start) >> PAGE_SHIFT),
419 if (new_addr & ~PAGE_MASK) { 491 map_flags);
420 ret = new_addr; 492 if (new_addr & ~PAGE_MASK) {
421 goto out; 493 ret = new_addr;
422 } 494 goto out;
423
424 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
425 if (ret)
426 goto out;
427 } 495 }
496
497 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
498 if (ret)
499 goto out;
428 ret = move_vma(vma, addr, old_len, new_len, new_addr); 500 ret = move_vma(vma, addr, old_len, new_len, new_addr);
429 } 501 }
430out: 502out:
431 if (ret & ~PAGE_MASK) 503 if (ret & ~PAGE_MASK)
432 vm_unacct_memory(charged); 504 vm_unacct_memory(charged);
433out_nc:
434 return ret; 505 return ret;
435} 506}
436 507
diff --git a/mm/util.c b/mm/util.c
index 7c35ad95f927..b377ce430803 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,10 @@
4#include <linux/module.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/hugetlb.h>
8#include <linux/syscalls.h>
9#include <linux/mman.h>
10#include <linux/file.h>
7#include <asm/uaccess.h> 11#include <asm/uaccess.h>
8 12
9#define CREATE_TRACE_POINTS 13#define CREATE_TRACE_POINTS
@@ -268,6 +272,46 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
268} 272}
269EXPORT_SYMBOL_GPL(get_user_pages_fast); 273EXPORT_SYMBOL_GPL(get_user_pages_fast);
270 274
275SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
276 unsigned long, prot, unsigned long, flags,
277 unsigned long, fd, unsigned long, pgoff)
278{
279 struct file * file = NULL;
280 unsigned long retval = -EBADF;
281
282 if (!(flags & MAP_ANONYMOUS)) {
283 if (unlikely(flags & MAP_HUGETLB))
284 return -EINVAL;
285 file = fget(fd);
286 if (!file)
287 goto out;
288 } else if (flags & MAP_HUGETLB) {
289 struct user_struct *user = NULL;
290 /*
291 * VM_NORESERVE is used because the reservations will be
292 * taken when vm_ops->mmap() is called
293 * A dummy user value is used because we are not locking
294 * memory so no accounting is necessary
295 */
296 len = ALIGN(len, huge_page_size(&default_hstate));
297 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
298 &user, HUGETLB_ANONHUGE_INODE);
299 if (IS_ERR(file))
300 return PTR_ERR(file);
301 }
302
303 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
304
305 down_write(&current->mm->mmap_sem);
306 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
307 up_write(&current->mm->mmap_sem);
308
309 if (file)
310 fput(file);
311out:
312 return retval;
313}
314
271/* Tracepoints definitions. */ 315/* Tracepoints definitions. */
272EXPORT_TRACEPOINT_SYMBOL(kmalloc); 316EXPORT_TRACEPOINT_SYMBOL(kmalloc);
273EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 317EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 62a9025cdcc7..6f426afbc522 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -104,7 +104,7 @@ static void device_id_check(const char *modname, const char *device_id,
104static void do_usb_entry(struct usb_device_id *id, 104static void do_usb_entry(struct usb_device_id *id,
105 unsigned int bcdDevice_initial, int bcdDevice_initial_digits, 105 unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
106 unsigned char range_lo, unsigned char range_hi, 106 unsigned char range_lo, unsigned char range_hi,
107 struct module *mod) 107 unsigned char max, struct module *mod)
108{ 108{
109 char alias[500]; 109 char alias[500];
110 strcpy(alias, "usb:"); 110 strcpy(alias, "usb:");
@@ -118,9 +118,22 @@ static void do_usb_entry(struct usb_device_id *id,
118 sprintf(alias + strlen(alias), "%0*X", 118 sprintf(alias + strlen(alias), "%0*X",
119 bcdDevice_initial_digits, bcdDevice_initial); 119 bcdDevice_initial_digits, bcdDevice_initial);
120 if (range_lo == range_hi) 120 if (range_lo == range_hi)
121 sprintf(alias + strlen(alias), "%u", range_lo); 121 sprintf(alias + strlen(alias), "%X", range_lo);
122 else if (range_lo > 0 || range_hi < 9) 122 else if (range_lo > 0 || range_hi < max) {
123 sprintf(alias + strlen(alias), "[%u-%u]", range_lo, range_hi); 123 if (range_lo > 0x9 || range_hi < 0xA)
124 sprintf(alias + strlen(alias),
125 "[%X-%X]",
126 range_lo,
127 range_hi);
128 else {
129 sprintf(alias + strlen(alias),
130 range_lo < 0x9 ? "[%X-9" : "[%X",
131 range_lo);
132 sprintf(alias + strlen(alias),
133 range_hi > 0xA ? "a-%X]" : "%X]",
134 range_lo);
135 }
136 }
124 if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1)) 137 if (bcdDevice_initial_digits < (sizeof(id->bcdDevice_lo) * 2 - 1))
125 strcat(alias, "*"); 138 strcat(alias, "*");
126 139
@@ -147,10 +160,49 @@ static void do_usb_entry(struct usb_device_id *id,
147 "MODULE_ALIAS(\"%s\");\n", alias); 160 "MODULE_ALIAS(\"%s\");\n", alias);
148} 161}
149 162
163/* Handles increment/decrement of BCD formatted integers */
164/* Returns the previous value, so it works like i++ or i-- */
165static unsigned int incbcd(unsigned int *bcd,
166 int inc,
167 unsigned char max,
168 size_t chars)
169{
170 unsigned int init = *bcd, i, j;
171 unsigned long long c, dec = 0;
172
173 /* If bcd is not in BCD format, just increment */
174 if (max > 0x9) {
175 *bcd += inc;
176 return init;
177 }
178
179 /* Convert BCD to Decimal */
180 for (i=0 ; i < chars ; i++) {
181 c = (*bcd >> (i << 2)) & 0xf;
182 c = c > 9 ? 9 : c; /* force to bcd just in case */
183 for (j=0 ; j < i ; j++)
184 c = c * 10;
185 dec += c;
186 }
187
188 /* Do our increment/decrement */
189 dec += inc;
190 *bcd = 0;
191
192 /* Convert back to BCD */
193 for (i=0 ; i < chars ; i++) {
194 for (c=1,j=0 ; j < i ; j++)
195 c = c * 10;
196 c = (dec / c) % 10;
197 *bcd += c << (i << 2);
198 }
199 return init;
200}
201
150static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod) 202static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
151{ 203{
152 unsigned int devlo, devhi; 204 unsigned int devlo, devhi;
153 unsigned char chi, clo; 205 unsigned char chi, clo, max;
154 int ndigits; 206 int ndigits;
155 207
156 id->match_flags = TO_NATIVE(id->match_flags); 208 id->match_flags = TO_NATIVE(id->match_flags);
@@ -162,6 +214,17 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
162 devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ? 214 devhi = id->match_flags & USB_DEVICE_ID_MATCH_DEV_HI ?
163 TO_NATIVE(id->bcdDevice_hi) : ~0x0U; 215 TO_NATIVE(id->bcdDevice_hi) : ~0x0U;
164 216
217 /* Figure out if this entry is in bcd or hex format */
218 max = 0x9; /* Default to decimal format */
219 for (ndigits = 0 ; ndigits < sizeof(id->bcdDevice_lo) * 2 ; ndigits++) {
220 clo = (devlo >> (ndigits << 2)) & 0xf;
221 chi = ((devhi > 0x9999 ? 0x9999 : devhi) >> (ndigits << 2)) & 0xf;
222 if (clo > max || chi > max) {
223 max = 0xf;
224 break;
225 }
226 }
227
165 /* 228 /*
166 * Some modules (visor) have empty slots as placeholder for 229 * Some modules (visor) have empty slots as placeholder for
167 * run-time specification that results in catch-all alias 230 * run-time specification that results in catch-all alias
@@ -173,21 +236,27 @@ static void do_usb_entry_multi(struct usb_device_id *id, struct module *mod)
173 for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) { 236 for (ndigits = sizeof(id->bcdDevice_lo) * 2 - 1; devlo <= devhi; ndigits--) {
174 clo = devlo & 0xf; 237 clo = devlo & 0xf;
175 chi = devhi & 0xf; 238 chi = devhi & 0xf;
176 if (chi > 9) /* it's bcd not hex */ 239 if (chi > max) /* If we are in bcd mode, truncate if necessary */
177 chi = 9; 240 chi = max;
178 devlo >>= 4; 241 devlo >>= 4;
179 devhi >>= 4; 242 devhi >>= 4;
180 243
181 if (devlo == devhi || !ndigits) { 244 if (devlo == devhi || !ndigits) {
182 do_usb_entry(id, devlo, ndigits, clo, chi, mod); 245 do_usb_entry(id, devlo, ndigits, clo, chi, max, mod);
183 break; 246 break;
184 } 247 }
185 248
186 if (clo > 0) 249 if (clo > 0x0)
187 do_usb_entry(id, devlo++, ndigits, clo, 9, mod); 250 do_usb_entry(id,
188 251 incbcd(&devlo, 1, max,
189 if (chi < 9) 252 sizeof(id->bcdDevice_lo) * 2),
190 do_usb_entry(id, devhi--, ndigits, 0, chi, mod); 253 ndigits, clo, max, max, mod);
254
255 if (chi < max)
256 do_usb_entry(id,
257 incbcd(&devhi, -1, max,
258 sizeof(id->bcdDevice_lo) * 2),
259 ndigits, 0x0, chi, max, mod);
191 } 260 }
192} 261}
193 262